aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/Kconfig5
-rw-r--r--drivers/block/brd.c39
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c16
-rw-r--r--drivers/block/floppy.c10
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c18
-rw-r--r--drivers/block/nbd.c443
-rw-r--r--drivers/block/null_blk.c1
-rw-r--r--drivers/block/pktcdvd.c49
-rw-r--r--drivers/block/skd_main.c238
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/xen-blkback/blkback.c10
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/ide/ide-atapi.c6
-rw-r--r--drivers/ide/ide-cd.c46
-rw-r--r--drivers/ide/ide-cd.h2
-rw-r--r--drivers/ide/ide-cd_ioctl.c6
-rw-r--r--drivers/ide/ide-io.c6
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/lightnvm/Makefile2
-rw-r--r--drivers/lightnvm/core.c377
-rw-r--r--drivers/lightnvm/gennvm.c645
-rw-r--r--drivers/lightnvm/gennvm.h34
-rw-r--r--drivers/lightnvm/lightnvm.h35
-rw-r--r--drivers/lightnvm/rrpc.c514
-rw-r--r--drivers/lightnvm/rrpc.h65
-rw-r--r--drivers/lightnvm/sysblk.c98
-rw-r--r--drivers/lightnvm/sysfs.c198
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/debug.c15
-rw-r--r--drivers/md/bcache/io.c4
-rw-r--r--drivers/md/bcache/journal.c4
-rw-r--r--drivers/md/bcache/movinggc.c6
-rw-r--r--drivers/md/bcache/request.c8
-rw-r--r--drivers/md/bcache/super.c16
-rw-r--r--drivers/md/bcache/writeback.c5
-rw-r--r--drivers/md/bcache/writeback.h3
-rw-r--r--drivers/md/dm-bufio.c6
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-rq.c52
-rw-r--r--drivers/md/dm-snap-persistent.c4
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid5-cache.c6
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/memstick/core/mspro_block.c2
-rw-r--r--drivers/mmc/card/block.c4
-rw-r--r--drivers/mmc/card/queue.c4
-rw-r--r--drivers/nvme/host/Kconfig17
-rw-r--r--drivers/nvme/host/Makefile3
-rw-r--r--drivers/nvme/host/core.c150
-rw-r--r--drivers/nvme/host/fabrics.c33
-rw-r--r--drivers/nvme/host/fc.c2586
-rw-r--r--drivers/nvme/host/lightnvm.c223
-rw-r--r--drivers/nvme/host/nvme.h51
-rw-r--r--drivers/nvme/host/pci.c47
-rw-r--r--drivers/nvme/host/rdma.c43
-rw-r--r--drivers/nvme/host/scsi.c11
-rw-r--r--drivers/nvme/target/Kconfig24
-rw-r--r--drivers/nvme/target/Makefile4
-rw-r--r--drivers/nvme/target/admin-cmd.c3
-rw-r--r--drivers/nvme/target/configfs.c20
-rw-r--r--drivers/nvme/target/core.c22
-rw-r--r--drivers/nvme/target/fabrics-cmd.c14
-rw-r--r--drivers/nvme/target/fc.c2288
-rw-r--r--drivers/nvme/target/fcloop.c1148
-rw-r--r--drivers/nvme/target/io-cmd.c39
-rw-r--r--drivers/nvme/target/loop.c26
-rw-r--r--drivers/nvme/target/nvmet.h8
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c8
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c92
-rw-r--r--drivers/scsi/sd.c181
-rw-r--r--drivers/scsi/sd.h70
-rw-r--r--drivers/scsi/sd_zbc.c648
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c6
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h1
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c1
-rw-r--r--drivers/target/target_core_iblock.c8
-rw-r--r--drivers/target/target_core_pscsi.c8
94 files changed, 8859 insertions, 2000 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 39dd30b6ef86..223ff2fcae7e 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -384,9 +384,12 @@ config BLK_DEV_RAM_DAX
allocated from highmem (only a problem for highmem systems).
config CDROM_PKTCDVD
- tristate "Packet writing on CD/DVD media"
+ tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
help
+ Note: This driver is deprecated and will be removed from the
+ kernel in the near future!
+
If you have a CDROM/DVD drive that supports packet writing, say
Y to include support. It should work with any MMC/Mt Fuji
compliant ATAPI or SCSI drive, which is just about any newer
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 0c76d4016eeb..ad793f35632c 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -395,44 +395,9 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector,
#define brd_direct_access NULL
#endif
-static int brd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int error;
- struct brd_device *brd = bdev->bd_disk->private_data;
-
- if (cmd != BLKFLSBUF)
- return -ENOTTY;
-
- /*
- * ram device BLKFLSBUF has special semantics, we want to actually
- * release and destroy the ramdisk data.
- */
- mutex_lock(&brd_mutex);
- mutex_lock(&bdev->bd_mutex);
- error = -EBUSY;
- if (bdev->bd_openers <= 1) {
- /*
- * Kill the cache first, so it isn't written back to the
- * device.
- *
- * Another thread might instantiate more buffercache here,
- * but there is not much we can do to close that race.
- */
- kill_bdev(bdev);
- brd_free_pages(brd);
- error = 0;
- }
- mutex_unlock(&bdev->bd_mutex);
- mutex_unlock(&brd_mutex);
-
- return error;
-}
-
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.rw_page = brd_rw_page,
- .ioctl = brd_ioctl,
.direct_access = brd_direct_access,
};
@@ -443,8 +408,8 @@ static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
module_param(rd_nr, int, S_IRUGO);
MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
-int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
-module_param(rd_size, int, S_IRUGO);
+unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
+module_param(rd_size, ulong, S_IRUGO);
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
static int max_part = 1;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 2d3d50ab74bf..8d7bcfa49c12 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -148,7 +148,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
op_flags |= REQ_FUA | REQ_PREFLUSH;
- op_flags |= REQ_SYNC | REQ_NOIDLE;
+ op_flags |= REQ_SYNC;
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 942384f34e22..c7728dd77230 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1266,7 +1266,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
bio->bi_bdev = device->ldev->backing_bdev;
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
- bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH);
+ bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
device->flush_jif = jiffies;
set_bit(FLUSH_PENDING, &device->flags);
@@ -1648,20 +1648,8 @@ next_bio:
page_chain_for_each(page) {
unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
- if (!bio_add_page(bio, page, len, 0)) {
- /* A single page must always be possible!
- * But in case it fails anyways,
- * we deal with it, and complain (below). */
- if (bio->bi_vcnt == 0) {
- drbd_err(device,
- "bio_add_page failed for len=%u, "
- "bi_vcnt=0 (bi_sector=%llu)\n",
- len, (uint64_t)bio->bi_iter.bi_sector);
- err = -ENOSPC;
- goto fail;
- }
+ if (!bio_add_page(bio, page, len, 0))
goto next_bio;
- }
data_size -= len;
sector += len >> 9;
--nr_pages;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e3d8e4ced4a2..a391a3cfb3fe 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3806,14 +3806,10 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive;
- bio_init(&bio);
- bio.bi_io_vec = &bio_vec;
- bio_vec.bv_page = page;
- bio_vec.bv_len = size;
- bio_vec.bv_offset = 0;
- bio.bi_vcnt = 1;
- bio.bi_iter.bi_size = size;
+ bio_init(&bio, &bio_vec, 1);
bio.bi_bdev = bdev;
+ bio_add_page(&bio, page, size, 0);
+
bio.bi_iter.bi_sector = 0;
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index fa1b7a90ba11..4af818766797 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1646,7 +1646,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(bd->rq);
if (lo->lo_state != Lo_bound)
- return -EIO;
+ return BLK_MQ_RQ_QUEUE_ERROR;
switch (req_op(cmd->rq)) {
case REQ_OP_FLUSH:
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 3cfd879267b2..f96ab717534c 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2035,18 +2035,14 @@ static int exec_drive_taskfile(struct driver_data *dd,
taskout = req_task->out_size;
taskin = req_task->in_size;
/* 130560 = 512 * 0xFF*/
- if (taskin > 130560 || taskout > 130560) {
- err = -EINVAL;
- goto abort;
- }
+ if (taskin > 130560 || taskout > 130560)
+ return -EINVAL;
if (taskout) {
outbuf = memdup_user(buf + outtotal, taskout);
- if (IS_ERR(outbuf)) {
- err = PTR_ERR(outbuf);
- outbuf = NULL;
- goto abort;
- }
+ if (IS_ERR(outbuf))
+ return PTR_ERR(outbuf);
+
outbuf_dma = pci_map_single(dd->pdev,
outbuf,
taskout,
@@ -3937,8 +3933,10 @@ static int mtip_block_initialize(struct driver_data *dd)
/* Generate the disk name, implemented same as in sd.c */
do {
- if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
+ if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL)) {
+ rv = -ENOMEM;
goto ida_get_error;
+ }
spin_lock(&rssd_index_lock);
rv = ida_get_new(&rssd_index_ida, &index);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7a1048755914..99c84468f154 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -41,26 +41,34 @@
#include <linux/nbd.h>
+struct nbd_sock {
+ struct socket *sock;
+ struct mutex tx_lock;
+};
+
#define NBD_TIMEDOUT 0
#define NBD_DISCONNECT_REQUESTED 1
+#define NBD_DISCONNECTED 2
+#define NBD_RUNNING 3
struct nbd_device {
u32 flags;
unsigned long runtime_flags;
- struct socket * sock; /* If == NULL, device is not ready, yet */
+ struct nbd_sock **socks;
int magic;
struct blk_mq_tag_set tag_set;
- struct mutex tx_lock;
+ struct mutex config_lock;
struct gendisk *disk;
- int blksize;
+ int num_connections;
+ atomic_t recv_threads;
+ wait_queue_head_t recv_wq;
+ loff_t blksize;
loff_t bytesize;
- /* protects initialization and shutdown of the socket */
- spinlock_t sock_lock;
struct task_struct *task_recv;
- struct task_struct *task_send;
+ struct task_struct *task_setup;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbg_dir;
@@ -69,7 +77,7 @@ struct nbd_device {
struct nbd_cmd {
struct nbd_device *nbd;
- struct list_head list;
+ struct completion send_complete;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -126,7 +134,7 @@ static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
}
static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
- int blocksize, int nr_blocks)
+ loff_t blocksize, loff_t nr_blocks)
{
int ret;
@@ -135,7 +143,7 @@ static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
return ret;
nbd->blksize = blocksize;
- nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
+ nbd->bytesize = blocksize * nr_blocks;
nbd_size_update(nbd, bdev);
@@ -159,22 +167,20 @@ static void nbd_end_request(struct nbd_cmd *cmd)
*/
static void sock_shutdown(struct nbd_device *nbd)
{
- struct socket *sock;
-
- spin_lock(&nbd->sock_lock);
+ int i;
- if (!nbd->sock) {
- spin_unlock(&nbd->sock_lock);
+ if (nbd->num_connections == 0)
+ return;
+ if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags))
return;
- }
-
- sock = nbd->sock;
- dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
- nbd->sock = NULL;
- spin_unlock(&nbd->sock_lock);
- kernel_sock_shutdown(sock, SHUT_RDWR);
- sockfd_put(sock);
+ for (i = 0; i < nbd->num_connections; i++) {
+ struct nbd_sock *nsock = nbd->socks[i];
+ mutex_lock(&nsock->tx_lock);
+ kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
+ mutex_unlock(&nsock->tx_lock);
+ }
+ dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
}
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
@@ -182,42 +188,38 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
struct nbd_device *nbd = cmd->nbd;
- struct socket *sock = NULL;
-
- spin_lock(&nbd->sock_lock);
+ dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
-
- if (nbd->sock) {
- sock = nbd->sock;
- get_file(sock->file);
- }
-
- spin_unlock(&nbd->sock_lock);
- if (sock) {
- kernel_sock_shutdown(sock, SHUT_RDWR);
- sockfd_put(sock);
- }
-
req->errors++;
- dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
+
+ /*
+ * If our disconnect packet times out then we're already holding the
+ * config_lock and could deadlock here, so just set an error and return,
+ * we'll handle shutting everything down later.
+ */
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ return BLK_EH_HANDLED;
+ mutex_lock(&nbd->config_lock);
+ sock_shutdown(nbd);
+ mutex_unlock(&nbd->config_lock);
return BLK_EH_HANDLED;
}
/*
* Send or receive packet.
*/
-static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
- int msg_flags)
+static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf,
+ int size, int msg_flags)
{
- struct socket *sock = nbd->sock;
+ struct socket *sock = nbd->socks[index]->sock;
int result;
struct msghdr msg;
struct kvec iov;
unsigned long pflags = current->flags;
if (unlikely(!sock)) {
- dev_err(disk_to_dev(nbd->disk),
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted %s on closed socket in sock_xmit\n",
(send ? "send" : "recv"));
return -EINVAL;
@@ -254,29 +256,29 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
return result;
}
-static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
- int flags)
+static inline int sock_send_bvec(struct nbd_device *nbd, int index,
+ struct bio_vec *bvec, int flags)
{
int result;
void *kaddr = kmap(bvec->bv_page);
- result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
+ result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset,
bvec->bv_len, flags);
kunmap(bvec->bv_page);
return result;
}
/* always call with the tx_lock held */
-static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
+static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
int result, flags;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
+ struct bio *bio;
u32 type;
+ u32 tag = blk_mq_unique_tag(req);
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
- type = NBD_CMD_DISC;
- else if (req_op(req) == REQ_OP_DISCARD)
+ if (req_op(req) == REQ_OP_DISCARD)
type = NBD_CMD_TRIM;
else if (req_op(req) == REQ_OP_FLUSH)
type = NBD_CMD_FLUSH;
@@ -288,73 +290,89 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
memset(&request, 0, sizeof(request));
request.magic = htonl(NBD_REQUEST_MAGIC);
request.type = htonl(type);
- if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
+ if (type != NBD_CMD_FLUSH) {
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
- memcpy(request.handle, &req->tag, sizeof(req->tag));
+ memcpy(request.handle, &tag, sizeof(tag));
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
cmd, nbdcmd_to_ascii(type),
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
- result = sock_xmit(nbd, 1, &request, sizeof(request),
+ result = sock_xmit(nbd, index, 1, &request, sizeof(request),
(type == NBD_CMD_WRITE) ? MSG_MORE : 0);
if (result <= 0) {
- dev_err(disk_to_dev(nbd->disk),
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
return -EIO;
}
- if (type == NBD_CMD_WRITE) {
- struct req_iterator iter;
+ if (type != NBD_CMD_WRITE)
+ return 0;
+
+ flags = 0;
+ bio = req->bio;
+ while (bio) {
+ struct bio *next = bio->bi_next;
+ struct bvec_iter iter;
struct bio_vec bvec;
- /*
- * we are really probing at internals to determine
- * whether to set MSG_MORE or not...
- */
- rq_for_each_segment(bvec, req, iter) {
- flags = 0;
- if (!rq_iter_last(bvec, iter))
+
+ bio_for_each_segment(bvec, bio, iter) {
+ bool is_last = !next && bio_iter_last(bvec, iter);
+
+ if (is_last)
flags = MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
cmd, bvec.bv_len);
- result = sock_send_bvec(nbd, &bvec, flags);
+ result = sock_send_bvec(nbd, index, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
return -EIO;
}
+ /*
+ * The completion might already have come in,
+ * so break for the last one instead of letting
+ * the iterator do it. This prevents use-after-free
+ * of the bio.
+ */
+ if (is_last)
+ break;
}
+ bio = next;
}
return 0;
}
-static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
+static inline int sock_recv_bvec(struct nbd_device *nbd, int index,
+ struct bio_vec *bvec)
{
int result;
void *kaddr = kmap(bvec->bv_page);
- result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
- MSG_WAITALL);
+ result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset,
+ bvec->bv_len, MSG_WAITALL);
kunmap(bvec->bv_page);
return result;
}
/* NULL returned = something went wrong, inform userspace */
-static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
+static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
{
int result;
struct nbd_reply reply;
struct nbd_cmd *cmd;
struct request *req = NULL;
u16 hwq;
- int tag;
+ u32 tag;
reply.magic = 0;
- result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
+ result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL);
if (result <= 0) {
- dev_err(disk_to_dev(nbd->disk),
- "Receive control failed (result %d)\n", result);
+ if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
+ !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
+ dev_err(disk_to_dev(nbd->disk),
+ "Receive control failed (result %d)\n", result);
return ERR_PTR(result);
}
@@ -364,7 +382,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
return ERR_PTR(-EPROTO);
}
- memcpy(&tag, reply.handle, sizeof(int));
+ memcpy(&tag, reply.handle, sizeof(u32));
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
@@ -376,7 +394,6 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
return ERR_PTR(-ENOENT);
}
cmd = blk_mq_rq_to_pdu(req);
-
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
@@ -390,7 +407,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
- result = sock_recv_bvec(nbd, &bvec);
+ result = sock_recv_bvec(nbd, index, &bvec);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
@@ -400,6 +417,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
cmd, bvec.bv_len);
}
+ } else {
+ /* See the comment in nbd_queue_rq. */
+ wait_for_completion(&cmd->send_complete);
}
return cmd;
}
@@ -418,25 +438,24 @@ static struct device_attribute pid_attr = {
.show = pid_show,
};
-static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
+struct recv_thread_args {
+ struct work_struct work;
+ struct nbd_device *nbd;
+ int index;
+};
+
+static void recv_work(struct work_struct *work)
{
+ struct recv_thread_args *args = container_of(work,
+ struct recv_thread_args,
+ work);
+ struct nbd_device *nbd = args->nbd;
struct nbd_cmd *cmd;
- int ret;
+ int ret = 0;
BUG_ON(nbd->magic != NBD_MAGIC);
-
- sk_set_memalloc(nbd->sock->sk);
-
- ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
- if (ret) {
- dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
- return ret;
- }
-
- nbd_size_update(nbd, bdev);
-
while (1) {
- cmd = nbd_read_stat(nbd);
+ cmd = nbd_read_stat(nbd, args->index);
if (IS_ERR(cmd)) {
ret = PTR_ERR(cmd);
break;
@@ -445,10 +464,14 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
nbd_end_request(cmd);
}
- nbd_size_clear(nbd, bdev);
-
- device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
- return ret;
+ /*
+ * We got an error, shut everybody down if this wasn't the result of a
+ * disconnect request.
+ */
+ if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
+ sock_shutdown(nbd);
+ atomic_dec(&nbd->recv_threads);
+ wake_up(&nbd->recv_wq);
}
static void nbd_clear_req(struct request *req, void *data, bool reserved)
@@ -466,51 +489,60 @@ static void nbd_clear_que(struct nbd_device *nbd)
{
BUG_ON(nbd->magic != NBD_MAGIC);
- /*
- * Because we have set nbd->sock to NULL under the tx_lock, all
- * modifications to the list must have completed by now.
- */
- BUG_ON(nbd->sock);
-
blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
}
-static void nbd_handle_cmd(struct nbd_cmd *cmd)
+static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_device *nbd = cmd->nbd;
+ struct nbd_sock *nsock;
+
+ if (index >= nbd->num_connections) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on invalid socket\n");
+ goto error_out;
+ }
+
+ if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on closed socket\n");
+ goto error_out;
+ }
- if (req->cmd_type != REQ_TYPE_FS)
+ if (req->cmd_type != REQ_TYPE_FS &&
+ req->cmd_type != REQ_TYPE_DRV_PRIV)
goto error_out;
- if (rq_data_dir(req) == WRITE &&
+ if (req->cmd_type == REQ_TYPE_FS &&
+ rq_data_dir(req) == WRITE &&
(nbd->flags & NBD_FLAG_READ_ONLY)) {
- dev_err(disk_to_dev(nbd->disk),
- "Write on read-only\n");
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Write on read-only\n");
goto error_out;
}
req->errors = 0;
- mutex_lock(&nbd->tx_lock);
- nbd->task_send = current;
- if (unlikely(!nbd->sock)) {
- mutex_unlock(&nbd->tx_lock);
- dev_err(disk_to_dev(nbd->disk),
- "Attempted send on closed socket\n");
+ nsock = nbd->socks[index];
+ mutex_lock(&nsock->tx_lock);
+ if (unlikely(!nsock->sock)) {
+ mutex_unlock(&nsock->tx_lock);
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on closed socket\n");
goto error_out;
}
- if (nbd_send_cmd(nbd, cmd) != 0) {
- dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
+ if (nbd_send_cmd(nbd, cmd, index) != 0) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Request send failed\n");
req->errors++;
nbd_end_request(cmd);
}
- nbd->task_send = NULL;
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nsock->tx_lock);
return;
@@ -524,39 +556,70 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ /*
+ * Since we look at the bio's to send the request over the network we
+ * need to make sure the completion work doesn't mark this request done
+ * before we are done doing our send. This keeps us from dereferencing
+ * freed data if we have particularly fast completions (ie we get the
+ * completion before we exit sock_xmit on the last bvec) or in the case
+ * that the server is misbehaving (or there was an error) before we're
+ * done sending everything over the wire.
+ */
+ init_completion(&cmd->send_complete);
blk_mq_start_request(bd->rq);
- nbd_handle_cmd(cmd);
+ nbd_handle_cmd(cmd, hctx->queue_num);
+ complete(&cmd->send_complete);
+
return BLK_MQ_RQ_QUEUE_OK;
}
-static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
+static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock)
{
- int ret = 0;
-
- spin_lock_irq(&nbd->sock_lock);
+ struct nbd_sock **socks;
+ struct nbd_sock *nsock;
- if (nbd->sock) {
- ret = -EBUSY;
- goto out;
+ if (!nbd->task_setup)
+ nbd->task_setup = current;
+ if (nbd->task_setup != current) {
+ dev_err(disk_to_dev(nbd->disk),
+ "Device being setup by another task");
+ return -EINVAL;
}
- nbd->sock = sock;
+ socks = krealloc(nbd->socks, (nbd->num_connections + 1) *
+ sizeof(struct nbd_sock *), GFP_KERNEL);
+ if (!socks)
+ return -ENOMEM;
+ nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
+ if (!nsock)
+ return -ENOMEM;
-out:
- spin_unlock_irq(&nbd->sock_lock);
+ nbd->socks = socks;
+
+ mutex_init(&nsock->tx_lock);
+ nsock->sock = sock;
+ socks[nbd->num_connections++] = nsock;
- return ret;
+ return 0;
}
/* Reset all properties of an NBD device */
static void nbd_reset(struct nbd_device *nbd)
{
+ int i;
+
+ for (i = 0; i < nbd->num_connections; i++)
+ kfree(nbd->socks[i]);
+ kfree(nbd->socks);
+ nbd->socks = NULL;
nbd->runtime_flags = 0;
nbd->blksize = 1024;
nbd->bytesize = 0;
set_capacity(nbd->disk, 0);
nbd->flags = 0;
nbd->tag_set.timeout = 0;
+ nbd->num_connections = 0;
+ nbd->task_setup = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
}
@@ -582,48 +645,68 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
blk_queue_write_cache(nbd->disk->queue, false, false);
}
+static void send_disconnects(struct nbd_device *nbd)
+{
+ struct nbd_request request = {};
+ int i, ret;
+
+ request.magic = htonl(NBD_REQUEST_MAGIC);
+ request.type = htonl(NBD_CMD_DISC);
+
+ for (i = 0; i < nbd->num_connections; i++) {
+ ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0);
+ if (ret <= 0)
+ dev_err(disk_to_dev(nbd->disk),
+ "Send disconnect failed %d\n", ret);
+ }
+}
+
static int nbd_dev_dbg_init(struct nbd_device *nbd);
static void nbd_dev_dbg_close(struct nbd_device *nbd);
-/* Must be called with tx_lock held */
-
+/* Must be called with config_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case NBD_DISCONNECT: {
- struct request *sreq;
-
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
- if (!nbd->sock)
+ if (!nbd->socks)
return -EINVAL;
- sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
- if (IS_ERR(sreq))
- return -ENOMEM;
-
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nbd->config_lock);
fsync_bdev(bdev);
- mutex_lock(&nbd->tx_lock);
- sreq->cmd_type = REQ_TYPE_DRV_PRIV;
+ mutex_lock(&nbd->config_lock);
/* Check again after getting mutex back. */
- if (!nbd->sock) {
- blk_mq_free_request(sreq);
+ if (!nbd->socks)
return -EINVAL;
- }
- set_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags);
-
- nbd_send_cmd(nbd, blk_mq_rq_to_pdu(sreq));
- blk_mq_free_request(sreq);
+ if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
+ &nbd->runtime_flags))
+ send_disconnects(nbd);
return 0;
}
-
+
case NBD_CLEAR_SOCK:
sock_shutdown(nbd);
nbd_clear_que(nbd);
kill_bdev(bdev);
+ nbd_bdev_reset(bdev);
+ /*
+ * We want to give the run thread a chance to wait for everybody
+ * to clean up and then do it's own cleanup.
+ */
+ if (!test_bit(NBD_RUNNING, &nbd->runtime_flags)) {
+ int i;
+
+ for (i = 0; i < nbd->num_connections; i++)
+ kfree(nbd->socks[i]);
+ kfree(nbd->socks);
+ nbd->socks = NULL;
+ nbd->num_connections = 0;
+ nbd->task_setup = NULL;
+ }
return 0;
case NBD_SET_SOCK: {
@@ -633,7 +716,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
if (!sock)
return err;
- err = nbd_set_socket(nbd, sock);
+ err = nbd_add_socket(nbd, sock);
if (!err && max_part)
bdev->bd_invalidated = 1;
@@ -648,7 +731,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_SET_SIZE:
return nbd_size_set(nbd, bdev, nbd->blksize,
- arg / nbd->blksize);
+ div_s64(arg, nbd->blksize));
case NBD_SET_SIZE_BLOCKS:
return nbd_size_set(nbd, bdev, nbd->blksize, arg);
@@ -662,26 +745,61 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return 0;
case NBD_DO_IT: {
- int error;
+ struct recv_thread_args *args;
+ int num_connections = nbd->num_connections;
+ int error = 0, i;
if (nbd->task_recv)
return -EBUSY;
- if (!nbd->sock)
+ if (!nbd->socks)
return -EINVAL;
+ if (num_connections > 1 &&
+ !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
+ dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
+ error = -EINVAL;
+ goto out_err;
+ }
- /* We have to claim the device under the lock */
+ set_bit(NBD_RUNNING, &nbd->runtime_flags);
+ blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
+ args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
+ if (!args) {
+ error = -ENOMEM;
+ goto out_err;
+ }
nbd->task_recv = current;
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nbd->config_lock);
nbd_parse_flags(nbd, bdev);
+ error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
+ if (error) {
+ dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
+ goto out_recv;
+ }
+
+ nbd_size_update(nbd, bdev);
+
nbd_dev_dbg_init(nbd);
- error = nbd_thread_recv(nbd, bdev);
+ for (i = 0; i < num_connections; i++) {
+ sk_set_memalloc(nbd->socks[i]->sock->sk);
+ atomic_inc(&nbd->recv_threads);
+ INIT_WORK(&args[i].work, recv_work);
+ args[i].nbd = nbd;
+ args[i].index = i;
+ queue_work(system_long_wq, &args[i].work);
+ }
+ wait_event_interruptible(nbd->recv_wq,
+ atomic_read(&nbd->recv_threads) == 0);
+ for (i = 0; i < num_connections; i++)
+ flush_work(&args[i].work);
nbd_dev_dbg_close(nbd);
-
- mutex_lock(&nbd->tx_lock);
+ nbd_size_clear(nbd, bdev);
+ device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
+out_recv:
+ mutex_lock(&nbd->config_lock);
nbd->task_recv = NULL;
-
+out_err:
sock_shutdown(nbd);
nbd_clear_que(nbd);
kill_bdev(bdev);
@@ -694,7 +812,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
error = -ETIMEDOUT;
nbd_reset(nbd);
-
return error;
}
@@ -726,9 +843,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
BUG_ON(nbd->magic != NBD_MAGIC);
- mutex_lock(&nbd->tx_lock);
+ mutex_lock(&nbd->config_lock);
error = __nbd_ioctl(bdev, nbd, cmd, arg);
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nbd->config_lock);
return error;
}
@@ -748,8 +865,6 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
if (nbd->task_recv)
seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
- if (nbd->task_send)
- seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
return 0;
}
@@ -817,7 +932,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
- debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
+ debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
return 0;
@@ -873,9 +988,7 @@ static int nbd_init_request(void *data, struct request *rq,
unsigned int numa_node)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
-
cmd->nbd = data;
- INIT_LIST_HEAD(&cmd->list);
return 0;
}
@@ -985,13 +1098,13 @@ static int __init nbd_init(void)
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
nbd_dev[i].magic = NBD_MAGIC;
- spin_lock_init(&nbd_dev[i].sock_lock);
- mutex_init(&nbd_dev[i].tx_lock);
+ mutex_init(&nbd_dev[i].config_lock);
disk->major = NBD_MAJOR;
disk->first_minor = i << part_shift;
disk->fops = &nbd_fops;
disk->private_data = &nbd_dev[i];
sprintf(disk->disk_name, "nbd%d", i);
+ init_waitqueue_head(&nbd_dev[i].recv_wq);
nbd_reset(&nbd_dev[i]);
add_disk(disk);
}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ba6f4a2e73db..4943ee22716e 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -577,6 +577,7 @@ static void null_nvm_unregister(struct nullb *nullb)
#else
static int null_nvm_register(struct nullb *nullb)
{
+ pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
return -EINVAL;
}
static void null_nvm_unregister(struct nullb *nullb) {}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 90fa4ac149db..95c98de92971 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -721,7 +721,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
rq->timeout = 60*HZ;
if (cgc->quiet)
- rq->cmd_flags |= REQ_QUIET;
+ rq->rq_flags |= RQF_QUIET;
blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
if (rq->errors)
@@ -944,39 +944,6 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
}
}
-/*
- * Copy all data for this packet to pkt->pages[], so that
- * a) The number of required segments for the write bio is minimized, which
- * is necessary for some scsi controllers.
- * b) The data can be used as cache to avoid read requests if we receive a
- * new write request for the same zone.
- */
-static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
-{
- int f, p, offs;
-
- /* Copy all data to pkt->pages[] */
- p = 0;
- offs = 0;
- for (f = 0; f < pkt->frames; f++) {
- if (bvec[f].bv_page != pkt->pages[p]) {
- void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
- void *vto = page_address(pkt->pages[p]) + offs;
- memcpy(vto, vfrom, CD_FRAMESIZE);
- kunmap_atomic(vfrom);
- bvec[f].bv_page = pkt->pages[p];
- bvec[f].bv_offset = offs;
- } else {
- BUG_ON(bvec[f].bv_offset != offs);
- }
- offs += CD_FRAMESIZE;
- if (offs >= PAGE_SIZE) {
- offs = 0;
- p++;
- }
- }
-}
-
static void pkt_end_io_read(struct bio *bio)
{
struct packet_data *pkt = bio->bi_private;
@@ -1298,7 +1265,6 @@ try_next_bio:
static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
int f;
- struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
bio_reset(pkt->w_bio);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
@@ -1308,9 +1274,10 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
/* XXX: locking? */
for (f = 0; f < pkt->frames; f++) {
- bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
- bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
- if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
+ struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
+ unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+
+ if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
BUG();
}
pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
@@ -1327,12 +1294,10 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
pkt->write_size, (unsigned long long)pkt->sector);
- if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
- pkt_make_local_copy(pkt, bvec);
+ if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
pkt->cache_valid = 1;
- } else {
+ else
pkt->cache_valid = 0;
- }
/* Start the write request */
atomic_set(&pkt->io_wait, 1);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 3822eae102db..abf805e332e2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -36,7 +36,6 @@
#include <linux/scatterlist.h>
#include <linux/version.h>
#include <linux/err.h>
-#include <linux/scatterlist.h>
#include <linux/aer.h>
#include <linux/ctype.h>
#include <linux/wait.h>
@@ -270,8 +269,6 @@ struct skd_device {
resource_size_t mem_phys[SKD_MAX_BARS];
u32 mem_size[SKD_MAX_BARS];
- skd_irq_type_t irq_type;
- u32 msix_count;
struct skd_msix_entry *msix_entries;
struct pci_dev *pdev;
@@ -2138,12 +2135,8 @@ static void skd_send_fitmsg(struct skd_device *skdev,
u8 *bp = (u8 *)skmsg->msg_buf;
int i;
for (i = 0; i < skmsg->length; i += 8) {
- pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- skdev->name, __func__, __LINE__,
- i, bp[i + 0], bp[i + 1], bp[i + 2],
- bp[i + 3], bp[i + 4], bp[i + 5],
- bp[i + 6], bp[i + 7]);
+ pr_debug("%s:%s:%d msg[%2d] %8ph\n",
+ skdev->name, __func__, __LINE__, i, &bp[i]);
if (i == 0)
i = 64 - 8;
}
@@ -2164,7 +2157,6 @@ static void skd_send_fitmsg(struct skd_device *skdev,
qcmd |= FIT_QCMD_MSGSIZE_64;
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
-
}
static void skd_send_special_fitmsg(struct skd_device *skdev,
@@ -2177,11 +2169,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
int i;
for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
- pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- skdev->name, __func__, __LINE__, i,
- bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
- bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
+ pr_debug("%s:%s:%d spcl[%2d] %8ph\n",
+ skdev->name, __func__, __LINE__, i, &bp[i]);
if (i == 0)
i = 64 - 8;
}
@@ -2955,8 +2944,8 @@ static void skd_completion_worker(struct work_struct *work)
static void skd_isr_msg_from_dev(struct skd_device *skdev);
-irqreturn_t
-static skd_isr(int irq, void *ptr)
+static irqreturn_t
+skd_isr(int irq, void *ptr)
{
struct skd_device *skdev;
u32 intstat;
@@ -3821,10 +3810,6 @@ static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
*/
struct skd_msix_entry {
- int have_irq;
- u32 vector;
- u32 entry;
- struct skd_device *rsp;
char isr_name[30];
};
@@ -3853,193 +3838,121 @@ static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
{ "(Queue Full 3)", skd_qfull_isr },
};
-static void skd_release_msix(struct skd_device *skdev)
-{
- struct skd_msix_entry *qentry;
- int i;
-
- if (skdev->msix_entries) {
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
- skdev = qentry->rsp;
-
- if (qentry->have_irq)
- devm_free_irq(&skdev->pdev->dev,
- qentry->vector, qentry->rsp);
- }
-
- kfree(skdev->msix_entries);
- }
-
- if (skdev->msix_count)
- pci_disable_msix(skdev->pdev);
-
- skdev->msix_count = 0;
- skdev->msix_entries = NULL;
-}
-
static int skd_acquire_msix(struct skd_device *skdev)
{
int i, rc;
struct pci_dev *pdev = skdev->pdev;
- struct msix_entry *entries;
- struct skd_msix_entry *qentry;
-
- entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
- GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
- entries[i].entry = i;
- rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
- if (rc) {
+ rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
+ PCI_IRQ_MSIX);
+ if (rc < 0) {
pr_err("(%s): failed to enable MSI-X %d\n",
skd_name(skdev), rc);
- goto msix_out;
+ goto out;
}
- skdev->msix_count = SKD_MAX_MSIX_COUNT;
- skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
- skdev->msix_count, GFP_KERNEL);
+ skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
+ sizeof(struct skd_msix_entry), GFP_KERNEL);
if (!skdev->msix_entries) {
rc = -ENOMEM;
pr_err("(%s): msix table allocation error\n",
skd_name(skdev));
- goto msix_out;
- }
-
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
- qentry->vector = entries[i].vector;
- qentry->entry = entries[i].entry;
- qentry->rsp = NULL;
- qentry->have_irq = 0;
- pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
- skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name,
- i, qentry->vector, qentry->entry);
+ goto out;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
+ for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+ struct skd_msix_entry *qentry = &skdev->msix_entries[i];
+
snprintf(qentry->isr_name, sizeof(qentry->isr_name),
"%s%d-msix %s", DRV_NAME, skdev->devno,
msix_entries[i].name);
- rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
- msix_entries[i].handler, 0,
- qentry->isr_name, skdev);
+
+ rc = devm_request_irq(&skdev->pdev->dev,
+ pci_irq_vector(skdev->pdev, i),
+ msix_entries[i].handler, 0,
+ qentry->isr_name, skdev);
if (rc) {
pr_err("(%s): Unable to register(%d) MSI-X "
"handler %d: %s\n",
skd_name(skdev), rc, i, qentry->isr_name);
goto msix_out;
- } else {
- qentry->have_irq = 1;
- qentry->rsp = skdev;
}
}
+
pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name, skdev->msix_count);
+ pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
return 0;
msix_out:
- if (entries)
- kfree(entries);
- skd_release_msix(skdev);
+ while (--i >= 0)
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
+out:
+ kfree(skdev->msix_entries);
+ skdev->msix_entries = NULL;
return rc;
}
static int skd_acquire_irq(struct skd_device *skdev)
{
+ struct pci_dev *pdev = skdev->pdev;
+ unsigned int irq_flag = PCI_IRQ_LEGACY;
int rc;
- struct pci_dev *pdev;
-
- pdev = skdev->pdev;
- skdev->msix_count = 0;
-RETRY_IRQ_TYPE:
- switch (skdev->irq_type) {
- case SKD_IRQ_MSIX:
+ if (skd_isr_type == SKD_IRQ_MSIX) {
rc = skd_acquire_msix(skdev);
if (!rc)
- pr_info("(%s): MSI-X %d irqs enabled\n",
- skd_name(skdev), skdev->msix_count);
- else {
- pr_err(
- "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
- skd_name(skdev), rc);
- skdev->irq_type = SKD_IRQ_MSI;
- goto RETRY_IRQ_TYPE;
- }
- break;
- case SKD_IRQ_MSI:
- snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
- DRV_NAME, skdev->devno);
- rc = pci_enable_msi_range(pdev, 1, 1);
- if (rc > 0) {
- rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
- skdev->isr_name, skdev);
- if (rc) {
- pci_disable_msi(pdev);
- pr_err(
- "(%s): failed to allocate the MSI interrupt %d\n",
- skd_name(skdev), rc);
- goto RETRY_IRQ_LEGACY;
- }
- pr_info("(%s): MSI irq %d enabled\n",
- skd_name(skdev), pdev->irq);
- } else {
-RETRY_IRQ_LEGACY:
- pr_err(
- "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
- skd_name(skdev), rc);
- skdev->irq_type = SKD_IRQ_LEGACY;
- goto RETRY_IRQ_TYPE;
- }
- break;
- case SKD_IRQ_LEGACY:
- snprintf(skdev->isr_name, sizeof(skdev->isr_name),
- "%s%d-legacy", DRV_NAME, skdev->devno);
- rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
- IRQF_SHARED, skdev->isr_name, skdev);
- if (!rc)
- pr_info("(%s): LEGACY irq %d enabled\n",
- skd_name(skdev), pdev->irq);
- else
- pr_err("(%s): request LEGACY irq error %d\n",
- skd_name(skdev), rc);
- break;
- default:
- pr_info("(%s): irq_type %d invalid, re-set to %d\n",
- skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
- skdev->irq_type = SKD_IRQ_LEGACY;
- goto RETRY_IRQ_TYPE;
+ return 0;
+
+ pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
+ skd_name(skdev), rc);
}
- return rc;
+
+ snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
+ skdev->devno);
+
+ if (skd_isr_type != SKD_IRQ_LEGACY)
+ irq_flag |= PCI_IRQ_MSI;
+ rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
+ if (rc < 0) {
+ pr_err("(%s): failed to allocate the MSI interrupt %d\n",
+ skd_name(skdev), rc);
+ return rc;
+ }
+
+ rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
+ pdev->msi_enabled ? 0 : IRQF_SHARED,
+ skdev->isr_name, skdev);
+ if (rc) {
+ pci_free_irq_vectors(pdev);
+ pr_err("(%s): failed to allocate interrupt %d\n",
+ skd_name(skdev), rc);
+ return rc;
+ }
+
+ return 0;
}
static void skd_release_irq(struct skd_device *skdev)
{
- switch (skdev->irq_type) {
- case SKD_IRQ_MSIX:
- skd_release_msix(skdev);
- break;
- case SKD_IRQ_MSI:
- devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
- pci_disable_msi(skdev->pdev);
- break;
- case SKD_IRQ_LEGACY:
- devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
- break;
- default:
- pr_err("(%s): wrong irq type %d!",
- skd_name(skdev), skdev->irq_type);
- break;
+ struct pci_dev *pdev = skdev->pdev;
+
+ if (skdev->msix_entries) {
+ int i;
+
+ for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ skdev);
+ }
+
+ kfree(skdev->msix_entries);
+ skdev->msix_entries = NULL;
+ } else {
+ devm_free_irq(&pdev->dev, pdev->irq, skdev);
}
+
+ pci_free_irq_vectors(pdev);
}
/*
@@ -4402,7 +4315,6 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
skdev->pdev = pdev;
skdev->devno = skd_next_devno++;
skdev->major = blk_major;
- skdev->irq_type = skd_isr_type;
sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
skdev->dev_max_queue_depth = 0;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index be90e15854ed..46f4c719fed9 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
- if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
+ if (op_is_sync(bio->bi_opf) || !mm_check_plugged(card))
activate(card);
spin_unlock_irq(&card->lock);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4a80ee752597..726c32e35db9 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1253,14 +1253,14 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
case BLKIF_OP_WRITE:
ring->st_wr_req++;
operation = REQ_OP_WRITE;
- operation_flags = WRITE_ODIRECT;
+ operation_flags = REQ_SYNC | REQ_IDLE;
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
operation = REQ_OP_WRITE;
- operation_flags = WRITE_FLUSH;
+ operation_flags = REQ_PREFLUSH;
break;
default:
operation = 0; /* make gcc happy */
@@ -1272,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
nseg = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.nr_segments : req->u.rw.nr_segments;
- if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
+ if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
unlikely((req->operation != BLKIF_OP_INDIRECT) &&
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@@ -1334,7 +1334,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
}
/* Wait on all outstanding I/O's and once that has been completed
- * issue the WRITE_FLUSH.
+ * issue the flush.
*/
if (drain)
xen_blk_drain_io(pending_req->ring);
@@ -1380,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
/* This will be hit if the operation was a flush or discard. */
if (!bio) {
- BUG_ON(operation_flags != WRITE_FLUSH);
+ BUG_ON(operation_flags != REQ_PREFLUSH);
bio = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9908597c5209..c000fdf048b2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2043,8 +2043,9 @@ static int blkif_recover(struct blkfront_info *info)
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
- blk_mq_requeue_request(req);
+ blk_mq_requeue_request(req, false);
}
+ blk_mq_start_stopped_hw_queues(info->rq, true);
blk_mq_kick_requeue_list(info->rq);
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 05352f490d60..f90ea221f7f2 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -211,7 +211,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
sense_rq->cmd[4] = cmd_len;
sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
- sense_rq->cmd_flags |= REQ_PREEMPT;
+ sense_rq->rq_flags |= RQF_PREEMPT;
if (drive->media == ide_tape)
sense_rq->cmd[13] = REQ_IDETAPE_PC1;
@@ -295,7 +295,7 @@ int ide_cd_expiry(ide_drive_t *drive)
wait = ATAPI_WAIT_PC;
break;
default:
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
printk(KERN_INFO PFX "cmd 0x%x timed out\n",
rq->cmd[0]);
wait = 0;
@@ -375,7 +375,7 @@ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
}
if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC)
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
return 1;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index bf9a2ad296ed..9cbd217bc0c9 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -98,7 +98,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
struct request_sense *sense = &drive->sense_data;
int log = 0;
- if (!sense || !rq || (rq->cmd_flags & REQ_QUIET))
+ if (!sense || !rq || (rq->rq_flags & RQF_QUIET))
return 0;
ide_debug_log(IDE_DBG_SENSE, "sense_key: 0x%x", sense->sense_key);
@@ -291,7 +291,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* (probably while trying to recover from a former error).
* Just give up.
*/
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
return 2;
}
@@ -311,7 +311,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
cdrom_saw_media_change(drive);
if (rq->cmd_type == REQ_TYPE_FS &&
- !(rq->cmd_flags & REQ_QUIET))
+ !(rq->rq_flags & RQF_QUIET))
printk(KERN_ERR PFX "%s: tray open\n",
drive->name);
}
@@ -346,7 +346,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* No point in retrying after an illegal request or data
* protect error.
*/
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
ide_dump_status(drive, "command error", stat);
do_end_request = 1;
break;
@@ -355,14 +355,14 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* No point in re-trying a zillion times on a bad sector.
* If we got here the error is not correctable.
*/
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
ide_dump_status(drive, "media error "
"(bad sector)", stat);
do_end_request = 1;
break;
case BLANK_CHECK:
/* disk appears blank? */
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
ide_dump_status(drive, "media error (blank)",
stat);
do_end_request = 1;
@@ -380,7 +380,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
}
if (rq->cmd_type != REQ_TYPE_FS) {
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
do_end_request = 1;
}
@@ -422,19 +422,19 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
int write, void *buffer, unsigned *bufflen,
struct request_sense *sense, int timeout,
- unsigned int cmd_flags)
+ req_flags_t rq_flags)
{
struct cdrom_info *info = drive->driver_data;
struct request_sense local_sense;
int retries = 10;
- unsigned int flags = 0;
+ req_flags_t flags = 0;
if (!sense)
sense = &local_sense;
ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
- "cmd_flags: 0x%x",
- cmd[0], write, timeout, cmd_flags);
+ "rq_flags: 0x%x",
+ cmd[0], write, timeout, rq_flags);
/* start of retry loop */
do {
@@ -446,7 +446,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
memcpy(rq->cmd, cmd, BLK_MAX_CDB);
rq->cmd_type = REQ_TYPE_ATA_PC;
rq->sense = sense;
- rq->cmd_flags |= cmd_flags;
+ rq->rq_flags |= rq_flags;
rq->timeout = timeout;
if (buffer) {
error = blk_rq_map_kern(drive->queue, rq, buffer,
@@ -462,14 +462,14 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
if (buffer)
*bufflen = rq->resid_len;
- flags = rq->cmd_flags;
+ flags = rq->rq_flags;
blk_put_request(rq);
/*
* FIXME: we should probably abort/retry or something in case of
* failure.
*/
- if (flags & REQ_FAILED) {
+ if (flags & RQF_FAILED) {
/*
* The request failed. Retry if it was due to a unit
* attention status (usually means media was changed).
@@ -494,10 +494,10 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
}
/* end of retry loop */
- } while ((flags & REQ_FAILED) && retries >= 0);
+ } while ((flags & RQF_FAILED) && retries >= 0);
/* return an error if the command failed */
- return (flags & REQ_FAILED) ? -EIO : 0;
+ return (flags & RQF_FAILED) ? -EIO : 0;
}
/*
@@ -589,7 +589,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
"(%u bytes)\n", drive->name, __func__,
cmd->nleft);
if (!write)
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
uptodate = 0;
}
} else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
@@ -607,7 +607,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
if (!uptodate)
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
}
goto out_end;
}
@@ -745,9 +745,9 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
rq->cmd[0], rq->cmd_type);
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
- rq->cmd_flags |= REQ_QUIET;
+ rq->rq_flags |= RQF_QUIET;
else
- rq->cmd_flags &= ~REQ_FAILED;
+ rq->rq_flags &= ~RQF_FAILED;
drive->dma = 0;
@@ -867,7 +867,7 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
*/
cmd[7] = cdi->sanyo_slot % 3;
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, RQF_QUIET);
}
static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
@@ -890,7 +890,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
cmd[0] = GPCMD_READ_CDVD_CAPACITY;
stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0,
- REQ_QUIET);
+ RQF_QUIET);
if (stat)
return stat;
@@ -943,7 +943,7 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
if (msf_flag)
cmd[1] = 2;
- return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, REQ_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, RQF_QUIET);
}
/* Try to read the entire TOC for the disk into our internal buffer. */
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 1efc936f5b66..eea60c986c4f 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -101,7 +101,7 @@ void ide_cd_log_error(const char *, struct request *, struct request_sense *);
/* ide-cd.c functions used by ide-cd_ioctl.c */
int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
- unsigned *, struct request_sense *, int, unsigned int);
+ unsigned *, struct request_sense *, int, req_flags_t);
int ide_cd_read_toc(ide_drive_t *, struct request_sense *);
int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
void ide_cdrom_update_speed(ide_drive_t *, u8 *);
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 5887a7a09e37..f085e3a2e1d6 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -305,7 +305,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV;
- rq->cmd_flags = REQ_QUIET;
+ rq->rq_flags = RQF_QUIET;
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
blk_put_request(rq);
/*
@@ -449,7 +449,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
ide_drive_t *drive = cdi->handle;
- unsigned int flags = 0;
+ req_flags_t flags = 0;
unsigned len = cgc->buflen;
if (cgc->timeout <= 0)
@@ -463,7 +463,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
memset(cgc->sense, 0, sizeof(struct request_sense));
if (cgc->quiet)
- flags |= REQ_QUIET;
+ flags |= RQF_QUIET;
cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
cgc->data_direction == CGC_DATA_WRITE,
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 669ea1e45795..6360bbd37efe 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -307,7 +307,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
{
ide_startstop_t startstop;
- BUG_ON(!(rq->cmd_flags & REQ_STARTED));
+ BUG_ON(!(rq->rq_flags & RQF_STARTED));
#ifdef DEBUG
printk("%s: start_request: current=0x%08lx\n",
@@ -316,7 +316,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
/* bail early if we've exceeded max_failures */
if (drive->max_failures && (drive->failures > drive->max_failures)) {
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
goto kill_rq;
}
@@ -539,7 +539,7 @@ repeat:
*/
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
ata_pm_request(rq) == 0 &&
- (rq->cmd_flags & REQ_PREEMPT) == 0) {
+ (rq->rq_flags & RQF_PREEMPT) == 0) {
/* there should be no pending command at this point */
ide_unlock_port(hwif);
goto plug_device;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index e34af488693a..a015acdffb39 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -53,7 +53,7 @@ static int ide_pm_execute_rq(struct request *rq)
spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dying(q))) {
- rq->cmd_flags |= REQ_QUIET;
+ rq->rq_flags |= RQF_QUIET;
rq->errors = -ENXIO;
__blk_end_request_all(rq, rq->errors);
spin_unlock_irq(q->queue_lock);
@@ -90,7 +90,7 @@ int generic_ide_resume(struct device *dev)
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
- rq->cmd_flags |= REQ_PREEMPT;
+ rq->rq_flags |= RQF_PREEMPT;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
rqpm.pm_state = PM_EVENT_ON;
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 1f6b6521016a..a7a0a22cf1a5 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,6 @@
# Makefile for Open-Channel SSDs.
#
-obj-$(CONFIG_NVM) := core.o sysblk.o sysfs.o
+obj-$(CONFIG_NVM) := core.o sysblk.o
obj-$(CONFIG_NVM_GENNVM) += gennvm.o
obj-$(CONFIG_NVM_RRPC) += rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 1cac0f8bc0dc..7622e3dc5d82 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -27,8 +27,6 @@
#include <linux/lightnvm.h>
#include <linux/sched/sysctl.h>
-#include "lightnvm.h"
-
static LIST_HEAD(nvm_tgt_types);
static DECLARE_RWSEM(nvm_tgtt_lock);
static LIST_HEAD(nvm_mgrs);
@@ -88,8 +86,7 @@ void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
}
EXPORT_SYMBOL(nvm_dev_dma_alloc);
-void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
- dma_addr_t dma_handler)
+void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
{
dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
}
@@ -178,38 +175,133 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
return NULL;
}
-struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
- unsigned long flags)
+static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
+ struct nvm_rq *rqd)
{
- return dev->mt->get_blk(dev, lun, flags);
+ struct nvm_dev *dev = tgt_dev->parent;
+ int i;
+
+ if (rqd->nr_ppas > 1) {
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
+ rqd->ppa_list[i], TRANS_TGT_TO_DEV);
+ rqd->ppa_list[i] = generic_to_dev_addr(dev,
+ rqd->ppa_list[i]);
+ }
+ } else {
+ rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
+ TRANS_TGT_TO_DEV);
+ rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
+ }
}
-EXPORT_SYMBOL(nvm_get_blk);
-/* Assumes that all valid pages have already been moved on release to bm */
-void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
+int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
+ int type)
{
- return dev->mt->put_blk(dev, blk);
+ struct nvm_rq rqd;
+ int ret;
+
+ if (nr_ppas > dev->ops->max_phys_sect) {
+ pr_err("nvm: unable to update all sysblocks atomically\n");
+ return -EINVAL;
+ }
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
+ nvm_generic_to_addr_mode(dev, &rqd);
+
+ ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+ nvm_free_rqd_ppalist(dev, &rqd);
+ if (ret) {
+ pr_err("nvm: sysblk failed bb mark\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL(nvm_put_blk);
+EXPORT_SYMBOL(nvm_set_bb_tbl);
-void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
+int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
+ int nr_ppas, int type)
{
- return dev->mt->mark_blk(dev, ppa, type);
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_rq rqd;
+ int ret;
+
+ if (nr_ppas > dev->ops->max_phys_sect) {
+ pr_err("nvm: unable to update all blocks atomically\n");
+ return -EINVAL;
+ }
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
+ nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
+
+ ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+ nvm_free_rqd_ppalist(dev, &rqd);
+ if (ret) {
+ pr_err("nvm: sysblk failed bb mark\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
+
+int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->ops->max_phys_sect;
}
-EXPORT_SYMBOL(nvm_mark_blk);
+EXPORT_SYMBOL(nvm_max_phys_sects);
-int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
- return dev->mt->submit_io(dev, rqd);
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->mt->submit_io(tgt_dev, rqd);
}
EXPORT_SYMBOL(nvm_submit_io);
-int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
+int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
{
- return dev->mt->erase_blk(dev, blk, 0);
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->mt->erase_blk(tgt_dev, p, flags);
}
EXPORT_SYMBOL(nvm_erase_blk);
+int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
+ nvm_l2p_update_fn *update_l2p, void *priv)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ if (!dev->ops->get_l2p_tbl)
+ return 0;
+
+ return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
+}
+EXPORT_SYMBOL(nvm_get_l2p_tbl);
+
+int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->mt->get_area(dev, lba, len);
+}
+EXPORT_SYMBOL(nvm_get_area);
+
+void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ dev->mt->put_area(dev, lba);
+}
+EXPORT_SYMBOL(nvm_put_area);
+
void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
int i;
@@ -241,10 +333,11 @@ EXPORT_SYMBOL(nvm_generic_to_addr_mode);
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
const struct ppa_addr *ppas, int nr_ppas, int vblk)
{
+ struct nvm_geo *geo = &dev->geo;
int i, plane_cnt, pl_idx;
struct ppa_addr ppa;
- if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
+ if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
rqd->nr_ppas = nr_ppas;
rqd->ppa_addr = ppas[0];
@@ -262,7 +355,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
for (i = 0; i < nr_ppas; i++)
rqd->ppa_list[i] = ppas[i];
} else {
- plane_cnt = dev->plane_mode;
+ plane_cnt = geo->plane_mode;
rqd->nr_ppas *= plane_cnt;
for (i = 0; i < nr_ppas; i++) {
@@ -287,7 +380,8 @@ void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_free_rqd_ppalist);
-int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
+int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
+ int flags)
{
struct nvm_rq rqd;
int ret;
@@ -303,6 +397,8 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
nvm_generic_to_addr_mode(dev, &rqd);
+ rqd.flags = flags;
+
ret = dev->ops->erase_block(dev, &rqd);
nvm_free_rqd_ppalist(dev, &rqd);
@@ -341,7 +437,7 @@ static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
nvm_generic_to_addr_mode(dev, rqd);
- rqd->dev = dev;
+ rqd->dev = NULL;
rqd->opcode = opcode;
rqd->flags = flags;
rqd->bio = bio;
@@ -437,17 +533,18 @@ EXPORT_SYMBOL(nvm_submit_ppa);
*/
int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
{
+ struct nvm_geo *geo = &dev->geo;
int blk, offset, pl, blktype;
- if (nr_blks != dev->blks_per_lun * dev->plane_mode)
+ if (nr_blks != geo->blks_per_lun * geo->plane_mode)
return -EINVAL;
- for (blk = 0; blk < dev->blks_per_lun; blk++) {
- offset = blk * dev->plane_mode;
+ for (blk = 0; blk < geo->blks_per_lun; blk++) {
+ offset = blk * geo->plane_mode;
blktype = blks[offset];
/* Bad blocks on any planes take precedence over other types */
- for (pl = 0; pl < dev->plane_mode; pl++) {
+ for (pl = 0; pl < geo->plane_mode; pl++) {
if (blks[offset + pl] &
(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
blktype = blks[offset + pl];
@@ -458,7 +555,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
blks[blk] = blktype;
}
- return dev->blks_per_lun;
+ return geo->blks_per_lun;
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);
@@ -470,11 +567,22 @@ int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
}
EXPORT_SYMBOL(nvm_get_bb_tbl);
+int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
+ u8 *blks)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
+ return nvm_get_bb_tbl(dev, ppa, blks);
+}
+EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
+
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
+ struct nvm_geo *geo = &dev->geo;
int i;
- dev->lps_per_blk = dev->pgs_per_blk;
+ dev->lps_per_blk = geo->pgs_per_blk;
dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
if (!dev->lptbl)
return -ENOMEM;
@@ -520,29 +628,32 @@ static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
struct nvm_id_group *grp = &id->groups[0];
+ struct nvm_geo *geo = &dev->geo;
int ret;
- /* device values */
- dev->nr_chnls = grp->num_ch;
- dev->luns_per_chnl = grp->num_lun;
- dev->pgs_per_blk = grp->num_pg;
- dev->blks_per_lun = grp->num_blk;
- dev->nr_planes = grp->num_pln;
- dev->fpg_size = grp->fpg_sz;
- dev->pfpg_size = grp->fpg_sz * grp->num_pln;
- dev->sec_size = grp->csecs;
- dev->oob_size = grp->sos;
- dev->sec_per_pg = grp->fpg_sz / grp->csecs;
- dev->mccap = grp->mccap;
- memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
-
- dev->plane_mode = NVM_PLANE_SINGLE;
- dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
+ /* Whole device values */
+ geo->nr_chnls = grp->num_ch;
+ geo->luns_per_chnl = grp->num_lun;
+
+ /* Generic device values */
+ geo->pgs_per_blk = grp->num_pg;
+ geo->blks_per_lun = grp->num_blk;
+ geo->nr_planes = grp->num_pln;
+ geo->fpg_size = grp->fpg_sz;
+ geo->pfpg_size = grp->fpg_sz * grp->num_pln;
+ geo->sec_size = grp->csecs;
+ geo->oob_size = grp->sos;
+ geo->sec_per_pg = grp->fpg_sz / grp->csecs;
+ geo->mccap = grp->mccap;
+ memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
+
+ geo->plane_mode = NVM_PLANE_SINGLE;
+ geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
if (grp->mpos & 0x020202)
- dev->plane_mode = NVM_PLANE_DOUBLE;
+ geo->plane_mode = NVM_PLANE_DOUBLE;
if (grp->mpos & 0x040404)
- dev->plane_mode = NVM_PLANE_QUAD;
+ geo->plane_mode = NVM_PLANE_QUAD;
if (grp->mtype != 0) {
pr_err("nvm: memory type not supported\n");
@@ -550,13 +661,13 @@ static int nvm_core_init(struct nvm_dev *dev)
}
/* calculated values */
- dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
- dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
- dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
- dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
+ geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
+ geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
+ geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
+ geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
- dev->total_secs = dev->nr_luns * dev->sec_per_lun;
- dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
+ dev->total_secs = geo->nr_luns * geo->sec_per_lun;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map)
return -ENOMEM;
@@ -583,7 +694,7 @@ static int nvm_core_init(struct nvm_dev *dev)
mutex_init(&dev->mlock);
spin_lock_init(&dev->lock);
- blk_queue_logical_block_size(dev->q, dev->sec_size);
+ blk_queue_logical_block_size(dev->q, geo->sec_size);
return 0;
err_fmtype:
@@ -617,6 +728,7 @@ void nvm_free(struct nvm_dev *dev)
static int nvm_init(struct nvm_dev *dev)
{
+ struct nvm_geo *geo = &dev->geo;
int ret = -EINVAL;
if (!dev->q || !dev->ops)
@@ -648,20 +760,15 @@ static int nvm_init(struct nvm_dev *dev)
}
pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
- dev->name, dev->sec_per_pg, dev->nr_planes,
- dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
- dev->nr_chnls);
+ dev->name, geo->sec_per_pg, geo->nr_planes,
+ geo->pgs_per_blk, geo->blks_per_lun,
+ geo->nr_luns, geo->nr_chnls);
return 0;
err:
pr_err("nvm: failed to initialize nvm\n");
return ret;
}
-static void nvm_exit(struct nvm_dev *dev)
-{
- nvm_sysfs_unregister_dev(dev);
-}
-
struct nvm_dev *nvm_alloc_dev(int node)
{
return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
@@ -691,10 +798,6 @@ int nvm_register(struct nvm_dev *dev)
}
}
- ret = nvm_sysfs_register_dev(dev);
- if (ret)
- goto err_ppalist;
-
if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
ret = nvm_get_sysblock(dev, &dev->sb);
if (!ret)
@@ -711,8 +814,6 @@ int nvm_register(struct nvm_dev *dev)
up_write(&nvm_lock);
return 0;
-err_ppalist:
- dev->ops->destroy_dma_pool(dev->dma_pool);
err_init:
kfree(dev->lun_map);
return ret;
@@ -725,7 +826,7 @@ void nvm_unregister(struct nvm_dev *dev)
list_del(&dev->devices);
up_write(&nvm_lock);
- nvm_exit(dev);
+ nvm_free(dev);
}
EXPORT_SYMBOL(nvm_unregister);
@@ -754,149 +855,15 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
}
s = &create->conf.s;
- if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
+ if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
pr_err("nvm: lun out of bound (%u:%u > %u)\n",
- s->lun_begin, s->lun_end, dev->nr_luns);
+ s->lun_begin, s->lun_end, dev->geo.nr_luns);
return -EINVAL;
}
return dev->mt->create_tgt(dev, create);
}
-#ifdef CONFIG_NVM_DEBUG
-static int nvm_configure_show(const char *val)
-{
- struct nvm_dev *dev;
- char opcode, devname[DISK_NAME_LEN];
- int ret;
-
- ret = sscanf(val, "%c %32s", &opcode, devname);
- if (ret != 2) {
- pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
- return -EINVAL;
- }
-
- down_write(&nvm_lock);
- dev = nvm_find_nvm_dev(devname);
- up_write(&nvm_lock);
- if (!dev) {
- pr_err("nvm: device not found\n");
- return -EINVAL;
- }
-
- if (!dev->mt)
- return 0;
-
- dev->mt->lun_info_print(dev);
-
- return 0;
-}
-
-static int nvm_configure_remove(const char *val)
-{
- struct nvm_ioctl_remove remove;
- struct nvm_dev *dev;
- char opcode;
- int ret = 0;
-
- ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
- if (ret != 2) {
- pr_err("nvm: invalid command. Use \"d targetname\".\n");
- return -EINVAL;
- }
-
- remove.flags = 0;
-
- list_for_each_entry(dev, &nvm_devices, devices) {
- ret = dev->mt->remove_tgt(dev, &remove);
- if (!ret)
- break;
- }
-
- return ret;
-}
-
-static int nvm_configure_create(const char *val)
-{
- struct nvm_ioctl_create create;
- char opcode;
- int lun_begin, lun_end, ret;
-
- ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
- create.tgtname, create.tgttype,
- &lun_begin, &lun_end);
- if (ret != 6) {
- pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
- return -EINVAL;
- }
-
- create.flags = 0;
- create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
- create.conf.s.lun_begin = lun_begin;
- create.conf.s.lun_end = lun_end;
-
- return __nvm_configure_create(&create);
-}
-
-
-/* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
-static int nvm_configure_by_str_event(const char *val,
- const struct kernel_param *kp)
-{
- char opcode;
- int ret;
-
- ret = sscanf(val, "%c", &opcode);
- if (ret != 1) {
- pr_err("nvm: string must have the format of \"cmd ...\"\n");
- return -EINVAL;
- }
-
- switch (opcode) {
- case 'a':
- return nvm_configure_create(val);
- case 'd':
- return nvm_configure_remove(val);
- case 's':
- return nvm_configure_show(val);
- default:
- pr_err("nvm: invalid command\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int nvm_configure_get(char *buf, const struct kernel_param *kp)
-{
- int sz;
- struct nvm_dev *dev;
-
- sz = sprintf(buf, "available devices:\n");
- down_write(&nvm_lock);
- list_for_each_entry(dev, &nvm_devices, devices) {
- if (sz > 4095 - DISK_NAME_LEN - 2)
- break;
- sz += sprintf(buf + sz, " %32s\n", dev->name);
- }
- up_write(&nvm_lock);
-
- return sz;
-}
-
-static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
- .set = nvm_configure_by_str_event,
- .get = nvm_configure_get,
-};
-
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "lnvm."
-
-module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
- 0644);
-
-#endif /* CONFIG_NVM_DEBUG */
-
static long nvm_ioctl_info(struct file *file, void __user *arg)
{
struct nvm_ioctl_info *info;
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index b74174c6d021..ca7880082d80 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -35,6 +35,165 @@ static const struct block_device_operations gen_fops = {
.owner = THIS_MODULE,
};
+static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t,
+ int lun_begin, int lun_end)
+{
+ int i;
+
+ for (i = lun_begin; i <= lun_end; i++) {
+ if (test_and_set_bit(i, dev->lun_map)) {
+ pr_err("nvm: lun %d already allocated\n", i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (--i > lun_begin)
+ clear_bit(i, dev->lun_map);
+
+ return -EBUSY;
+}
+
+static void gen_release_luns_err(struct nvm_dev *dev, int lun_begin,
+ int lun_end)
+{
+ int i;
+
+ for (i = lun_begin; i <= lun_end; i++)
+ WARN_ON(!test_and_clear_bit(i, dev->lun_map));
+}
+
+static void gen_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct gen_dev_map *dev_map = tgt_dev->map;
+ int i, j;
+
+ for (i = 0; i < dev_map->nr_chnls; i++) {
+ struct gen_ch_map *ch_map = &dev_map->chnls[i];
+ int *lun_offs = ch_map->lun_offs;
+ int ch = i + ch_map->ch_off;
+
+ for (j = 0; j < ch_map->nr_luns; j++) {
+ int lun = j + lun_offs[j];
+ int lunid = (ch * dev->geo.luns_per_chnl) + lun;
+
+ WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+ }
+
+ kfree(ch_map->lun_offs);
+ }
+
+ kfree(dev_map->chnls);
+ kfree(dev_map);
+ kfree(tgt_dev->luns);
+ kfree(tgt_dev);
+}
+
+static struct nvm_tgt_dev *gen_create_tgt_dev(struct nvm_dev *dev,
+ int lun_begin, int lun_end)
+{
+ struct nvm_tgt_dev *tgt_dev = NULL;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ struct gen_dev_map *dev_map;
+ struct ppa_addr *luns;
+ int nr_luns = lun_end - lun_begin + 1;
+ int luns_left = nr_luns;
+ int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
+ int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
+ int bch = lun_begin / dev->geo.luns_per_chnl;
+ int blun = lun_begin % dev->geo.luns_per_chnl;
+ int lunid = 0;
+ int lun_balanced = 1;
+ int prev_nr_luns;
+ int i, j;
+
+ nr_chnls = nr_luns / dev->geo.luns_per_chnl;
+ nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
+
+ dev_map = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
+ if (!dev_map)
+ goto err_dev;
+
+ dev_map->chnls = kcalloc(nr_chnls, sizeof(struct gen_ch_map),
+ GFP_KERNEL);
+ if (!dev_map->chnls)
+ goto err_chnls;
+
+ luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
+ if (!luns)
+ goto err_luns;
+
+ prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
+ dev->geo.luns_per_chnl : luns_left;
+ for (i = 0; i < nr_chnls; i++) {
+ struct gen_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
+ int *lun_roffs = ch_rmap->lun_offs;
+ struct gen_ch_map *ch_map = &dev_map->chnls[i];
+ int *lun_offs;
+ int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
+ dev->geo.luns_per_chnl : luns_left;
+
+ if (lun_balanced && prev_nr_luns != luns_in_chnl)
+ lun_balanced = 0;
+
+ ch_map->ch_off = ch_rmap->ch_off = bch;
+ ch_map->nr_luns = luns_in_chnl;
+
+ lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
+ if (!lun_offs)
+ goto err_ch;
+
+ for (j = 0; j < luns_in_chnl; j++) {
+ luns[lunid].ppa = 0;
+ luns[lunid].g.ch = i;
+ luns[lunid++].g.lun = j;
+
+ lun_offs[j] = blun;
+ lun_roffs[j + blun] = blun;
+ }
+
+ ch_map->lun_offs = lun_offs;
+
+ /* when starting a new channel, lun offset is reset */
+ blun = 0;
+ luns_left -= luns_in_chnl;
+ }
+
+ dev_map->nr_chnls = nr_chnls;
+
+ tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
+ if (!tgt_dev)
+ goto err_ch;
+
+ memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
+ /* Target device only owns a portion of the physical device */
+ tgt_dev->geo.nr_chnls = nr_chnls;
+ tgt_dev->geo.nr_luns = nr_luns;
+ tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
+ tgt_dev->q = dev->q;
+ tgt_dev->map = dev_map;
+ tgt_dev->luns = luns;
+ memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
+
+ tgt_dev->parent = dev;
+
+ return tgt_dev;
+err_ch:
+ while (--i > 0)
+ kfree(dev_map->chnls[i].lun_offs);
+ kfree(luns);
+err_luns:
+ kfree(dev_map->chnls);
+err_chnls:
+ kfree(dev_map);
+err_dev:
+ return tgt_dev;
+}
+
static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
struct gen_dev *gn = dev->mp;
@@ -43,6 +202,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
struct gendisk *tdisk;
struct nvm_tgt_type *tt;
struct nvm_target *t;
+ struct nvm_tgt_dev *tgt_dev;
void *targetdata;
tt = nvm_find_target_type(create->tgttype, 1);
@@ -64,9 +224,18 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
if (!t)
return -ENOMEM;
+ if (gen_reserve_luns(dev, t, s->lun_begin, s->lun_end))
+ goto err_t;
+
+ tgt_dev = gen_create_tgt_dev(dev, s->lun_begin, s->lun_end);
+ if (!tgt_dev) {
+ pr_err("nvm: could not create target device\n");
+ goto err_reserve;
+ }
+
tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
if (!tqueue)
- goto err_t;
+ goto err_dev;
blk_queue_make_request(tqueue, tt->make_rq);
tdisk = alloc_disk(0);
@@ -80,7 +249,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
tdisk->fops = &gen_fops;
tdisk->queue = tqueue;
- targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
+ targetdata = tt->init(tgt_dev, tdisk);
if (IS_ERR(targetdata))
goto err_init;
@@ -94,7 +263,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
t->type = tt;
t->disk = tdisk;
- t->dev = dev;
+ t->dev = tgt_dev;
mutex_lock(&gn->lock);
list_add_tail(&t->list, &gn->targets);
@@ -105,6 +274,10 @@ err_init:
put_disk(tdisk);
err_queue:
blk_cleanup_queue(tqueue);
+err_dev:
+ kfree(tgt_dev);
+err_reserve:
+ gen_release_luns_err(dev, s->lun_begin, s->lun_end);
err_t:
kfree(t);
return -ENOMEM;
@@ -122,6 +295,7 @@ static void __gen_remove_target(struct nvm_target *t)
if (tt->exit)
tt->exit(tdisk->private_data);
+ gen_remove_tgt_dev(t->dev);
put_disk(tdisk);
list_del(&t->list);
@@ -160,10 +334,11 @@ static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
{
+ struct nvm_geo *geo = &dev->geo;
struct gen_dev *gn = dev->mp;
struct gen_area *area, *prev, *next;
sector_t begin = 0;
- sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
+ sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
if (len > max_sectors)
return -EINVAL;
@@ -220,240 +395,74 @@ static void gen_put_area(struct nvm_dev *dev, sector_t begin)
spin_unlock(&dev->lock);
}
-static void gen_blocks_free(struct nvm_dev *dev)
-{
- struct gen_dev *gn = dev->mp;
- struct gen_lun *lun;
- int i;
-
- gen_for_each_lun(gn, lun, i) {
- if (!lun->vlun.blocks)
- break;
- vfree(lun->vlun.blocks);
- }
-}
-
-static void gen_luns_free(struct nvm_dev *dev)
-{
- struct gen_dev *gn = dev->mp;
-
- kfree(gn->luns);
-}
-
-static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
-{
- struct gen_lun *lun;
- int i;
-
- gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
- if (!gn->luns)
- return -ENOMEM;
-
- gen_for_each_lun(gn, lun, i) {
- spin_lock_init(&lun->vlun.lock);
- INIT_LIST_HEAD(&lun->free_list);
- INIT_LIST_HEAD(&lun->used_list);
- INIT_LIST_HEAD(&lun->bb_list);
-
- lun->reserved_blocks = 2; /* for GC only */
- lun->vlun.id = i;
- lun->vlun.lun_id = i % dev->luns_per_chnl;
- lun->vlun.chnl_id = i / dev->luns_per_chnl;
- lun->vlun.nr_free_blocks = dev->blks_per_lun;
- }
- return 0;
-}
-
-static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
- u8 *blks, int nr_blks)
-{
- struct nvm_dev *dev = gn->dev;
- struct gen_lun *lun;
- struct nvm_block *blk;
- int i;
-
- nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
- if (nr_blks < 0)
- return nr_blks;
-
- lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == 0)
- continue;
-
- blk = &lun->vlun.blocks[i];
- list_move_tail(&blk->list, &lun->bb_list);
- lun->vlun.nr_free_blocks--;
- }
-
- return 0;
-}
-
-static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
-{
- struct nvm_dev *dev = private;
- struct gen_dev *gn = dev->mp;
- u64 elba = slba + nlb;
- struct gen_lun *lun;
- struct nvm_block *blk;
- u64 i;
- int lun_id;
-
- if (unlikely(elba > dev->total_secs)) {
- pr_err("gen: L2P data from device is out of bounds!\n");
- return -EINVAL;
- }
-
- for (i = 0; i < nlb; i++) {
- u64 pba = le64_to_cpu(entries[i]);
-
- if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
- pr_err("gen: L2P data entry is out of bounds!\n");
- return -EINVAL;
- }
-
- /* Address zero is a special one. The first page on a disk is
- * protected. It often holds internal device boot
- * information.
- */
- if (!pba)
- continue;
-
- /* resolve block from physical address */
- lun_id = div_u64(pba, dev->sec_per_lun);
- lun = &gn->luns[lun_id];
-
- /* Calculate block offset into lun */
- pba = pba - (dev->sec_per_lun * lun_id);
- blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
-
- if (!blk->state) {
- /* at this point, we don't know anything about the
- * block. It's up to the FTL on top to re-etablish the
- * block state. The block is assumed to be open.
- */
- list_move_tail(&blk->list, &lun->used_list);
- blk->state = NVM_BLK_ST_TGT;
- lun->vlun.nr_free_blocks--;
- }
- }
-
- return 0;
-}
-
-static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
-{
- struct gen_lun *lun;
- struct nvm_block *block;
- sector_t lun_iter, blk_iter, cur_block_id = 0;
- int ret, nr_blks;
- u8 *blks;
-
- nr_blks = dev->blks_per_lun * dev->plane_mode;
- blks = kmalloc(nr_blks, GFP_KERNEL);
- if (!blks)
- return -ENOMEM;
-
- gen_for_each_lun(gn, lun, lun_iter) {
- lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
- dev->blks_per_lun);
- if (!lun->vlun.blocks) {
- kfree(blks);
- return -ENOMEM;
- }
-
- for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
- block = &lun->vlun.blocks[blk_iter];
-
- INIT_LIST_HEAD(&block->list);
-
- block->lun = &lun->vlun;
- block->id = cur_block_id++;
-
- /* First block is reserved for device */
- if (unlikely(lun_iter == 0 && blk_iter == 0)) {
- lun->vlun.nr_free_blocks--;
- continue;
- }
-
- list_add_tail(&block->list, &lun->free_list);
- }
-
- if (dev->ops->get_bb_tbl) {
- struct ppa_addr ppa;
-
- ppa.ppa = 0;
- ppa.g.ch = lun->vlun.chnl_id;
- ppa.g.lun = lun->vlun.lun_id;
-
- ret = nvm_get_bb_tbl(dev, ppa, blks);
- if (ret)
- pr_err("gen: could not get BB table\n");
-
- ret = gen_block_bb(gn, ppa, blks, nr_blks);
- if (ret)
- pr_err("gen: BB table map failed\n");
- }
- }
-
- if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
- ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
- gen_block_map, dev);
- if (ret) {
- pr_err("gen: could not read L2P table.\n");
- pr_warn("gen: default block initialization");
- }
- }
-
- kfree(blks);
- return 0;
-}
-
static void gen_free(struct nvm_dev *dev)
{
- gen_blocks_free(dev);
- gen_luns_free(dev);
kfree(dev->mp);
+ kfree(dev->rmap);
dev->mp = NULL;
}
static int gen_register(struct nvm_dev *dev)
{
struct gen_dev *gn;
- int ret;
+ struct gen_dev_map *dev_rmap;
+ int i, j;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
if (!gn)
- return -ENOMEM;
+ goto err_gn;
+
+ dev_rmap = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
+ if (!dev_rmap)
+ goto err_rmap;
+
+ dev_rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct gen_ch_map),
+ GFP_KERNEL);
+ if (!dev_rmap->chnls)
+ goto err_chnls;
+
+ for (i = 0; i < dev->geo.nr_chnls; i++) {
+ struct gen_ch_map *ch_rmap;
+ int *lun_roffs;
+ int luns_in_chnl = dev->geo.luns_per_chnl;
+
+ ch_rmap = &dev_rmap->chnls[i];
+
+ ch_rmap->ch_off = -1;
+ ch_rmap->nr_luns = luns_in_chnl;
+
+ lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
+ if (!lun_roffs)
+ goto err_ch;
+
+ for (j = 0; j < luns_in_chnl; j++)
+ lun_roffs[j] = -1;
+
+ ch_rmap->lun_offs = lun_roffs;
+ }
gn->dev = dev;
- gn->nr_luns = dev->nr_luns;
+ gn->nr_luns = dev->geo.nr_luns;
INIT_LIST_HEAD(&gn->area_list);
mutex_init(&gn->lock);
INIT_LIST_HEAD(&gn->targets);
dev->mp = gn;
-
- ret = gen_luns_init(dev, gn);
- if (ret) {
- pr_err("gen: could not initialize luns\n");
- goto err;
- }
-
- ret = gen_blocks_init(dev, gn);
- if (ret) {
- pr_err("gen: could not initialize blocks\n");
- goto err;
- }
+ dev->rmap = dev_rmap;
return 1;
-err:
+err_ch:
+ while (--i >= 0)
+ kfree(dev_rmap->chnls[i].lun_offs);
+err_chnls:
+ kfree(dev_rmap);
+err_rmap:
gen_free(dev);
+err_gn:
module_put(THIS_MODULE);
- return ret;
+ return -ENOMEM;
}
static void gen_unregister(struct nvm_dev *dev)
@@ -463,7 +472,7 @@ static void gen_unregister(struct nvm_dev *dev)
mutex_lock(&gn->lock);
list_for_each_entry_safe(t, tmp, &gn->targets, list) {
- if (t->dev != dev)
+ if (t->dev->parent != dev)
continue;
__gen_remove_target(t);
}
@@ -473,168 +482,142 @@ static void gen_unregister(struct nvm_dev *dev)
module_put(THIS_MODULE);
}
-static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
- struct nvm_lun *vlun, unsigned long flags)
+static int gen_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
{
- struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
- struct nvm_block *blk = NULL;
- int is_gc = flags & NVM_IOTYPE_GC;
-
- spin_lock(&vlun->lock);
- if (list_empty(&lun->free_list)) {
- pr_err_ratelimited("gen: lun %u have no free pages available",
- lun->vlun.id);
- goto out;
+ struct gen_dev_map *dev_map = tgt_dev->map;
+ struct gen_ch_map *ch_map = &dev_map->chnls[p->g.ch];
+ int lun_off = ch_map->lun_offs[p->g.lun];
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ struct gen_ch_map *ch_rmap;
+ int lun_roff;
+
+ p->g.ch += ch_map->ch_off;
+ p->g.lun += lun_off;
+
+ ch_rmap = &dev_rmap->chnls[p->g.ch];
+ lun_roff = ch_rmap->lun_offs[p->g.lun];
+
+ if (unlikely(ch_rmap->ch_off < 0 || lun_roff < 0)) {
+ pr_err("nvm: corrupted device partition table\n");
+ return -EINVAL;
}
- if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
- goto out;
-
- blk = list_first_entry(&lun->free_list, struct nvm_block, list);
-
- list_move_tail(&blk->list, &lun->used_list);
- blk->state = NVM_BLK_ST_TGT;
- lun->vlun.nr_free_blocks--;
-out:
- spin_unlock(&vlun->lock);
- return blk;
-}
-
-static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
-{
- struct nvm_lun *vlun = blk->lun;
- struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
-
- spin_lock(&vlun->lock);
- if (blk->state & NVM_BLK_ST_TGT) {
- list_move_tail(&blk->list, &lun->free_list);
- lun->vlun.nr_free_blocks++;
- blk->state = NVM_BLK_ST_FREE;
- } else if (blk->state & NVM_BLK_ST_BAD) {
- list_move_tail(&blk->list, &lun->bb_list);
- blk->state = NVM_BLK_ST_BAD;
- } else {
- WARN_ON_ONCE(1);
- pr_err("gen: erroneous block type (%lu -> %u)\n",
- blk->id, blk->state);
- list_move_tail(&blk->list, &lun->bb_list);
- }
- spin_unlock(&vlun->lock);
+ return 0;
}
-static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
+static int gen_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
{
- struct gen_dev *gn = dev->mp;
- struct gen_lun *lun;
- struct nvm_block *blk;
-
- pr_debug("gen: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
- ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
-
- if (unlikely(ppa.g.ch > dev->nr_chnls ||
- ppa.g.lun > dev->luns_per_chnl ||
- ppa.g.blk > dev->blks_per_lun)) {
- WARN_ON_ONCE(1);
- pr_err("gen: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
- ppa.g.ch, dev->nr_chnls,
- ppa.g.lun, dev->luns_per_chnl,
- ppa.g.blk, dev->blks_per_lun);
- return;
- }
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ struct gen_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
+ int lun_roff = ch_rmap->lun_offs[p->g.lun];
- lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
- blk = &lun->vlun.blocks[ppa.g.blk];
+ p->g.ch -= ch_rmap->ch_off;
+ p->g.lun -= lun_roff;
- /* will be moved to bb list on put_blk from target */
- blk->state = type;
+ return 0;
}
-/*
- * mark block bad in gen. It is expected that the target recovers separately
- */
-static void gen_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
+static int gen_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
+ int flag)
{
- int bit = -1;
- int max_secs = dev->ops->max_phys_sect;
- void *comp_bits = &rqd->ppa_status;
+ gen_trans_fn *f;
+ int i;
+ int ret = 0;
- nvm_addr_to_generic_mode(dev, rqd);
+ f = (flag == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
- /* look up blocks and mark them as bad */
- if (rqd->nr_ppas == 1) {
- gen_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
- return;
+ if (rqd->nr_ppas == 1)
+ return f(tgt_dev, &rqd->ppa_addr);
+
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ ret = f(tgt_dev, &rqd->ppa_list[i]);
+ if (ret)
+ goto out;
}
- while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
- gen_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
+out:
+ return ret;
}
static void gen_end_io(struct nvm_rq *rqd)
{
+ struct nvm_tgt_dev *tgt_dev = rqd->dev;
struct nvm_tgt_instance *ins = rqd->ins;
- if (rqd->error == NVM_RSP_ERR_FAILWRITE)
- gen_mark_blk_bad(rqd->dev, rqd);
+ /* Convert address space */
+ if (tgt_dev)
+ gen_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT);
ins->tt->end_io(rqd);
}
-static int gen_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+static int gen_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
+ struct nvm_dev *dev = tgt_dev->parent;
+
if (!dev->ops->submit_io)
return -ENODEV;
/* Convert address space */
+ gen_trans_rq(tgt_dev, rqd, TRANS_TGT_TO_DEV);
nvm_generic_to_addr_mode(dev, rqd);
- rqd->dev = dev;
+ rqd->dev = tgt_dev;
rqd->end_io = gen_end_io;
return dev->ops->submit_io(dev, rqd);
}
-static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
- unsigned long flags)
+static int gen_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p,
+ int flags)
{
- struct ppa_addr addr = block_to_ppa(dev, blk);
+ /* Convert address space */
+ gen_map_to_dev(tgt_dev, p);
- return nvm_erase_ppa(dev, &addr, 1);
+ return nvm_erase_ppa(tgt_dev->parent, p, 1, flags);
}
-static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
+static struct ppa_addr gen_trans_ppa(struct nvm_tgt_dev *tgt_dev,
+ struct ppa_addr p, int direction)
{
- return test_and_set_bit(lunid, dev->lun_map);
-}
+ gen_trans_fn *f;
+ struct ppa_addr ppa = p;
-static void gen_release_lun(struct nvm_dev *dev, int lunid)
-{
- WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+ f = (direction == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
+ f(tgt_dev, &ppa);
+
+ return ppa;
}
-static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
+static void gen_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
+ int len)
{
- struct gen_dev *gn = dev->mp;
-
- if (unlikely(lunid >= dev->nr_luns))
- return NULL;
+ struct nvm_geo *geo = &dev->geo;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ u64 i;
- return &gn->luns[lunid].vlun;
-}
+ for (i = 0; i < len; i++) {
+ struct gen_ch_map *ch_rmap;
+ int *lun_roffs;
+ struct ppa_addr gaddr;
+ u64 pba = le64_to_cpu(entries[i]);
+ int off;
+ u64 diff;
-static void gen_lun_info_print(struct nvm_dev *dev)
-{
- struct gen_dev *gn = dev->mp;
- struct gen_lun *lun;
- unsigned int i;
+ if (!pba)
+ continue;
+ gaddr = linear_to_generic_addr(geo, pba);
+ ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
+ lun_roffs = ch_rmap->lun_offs;
- gen_for_each_lun(gn, lun, i) {
- spin_lock(&lun->vlun.lock);
+ off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
- pr_info("%s: lun%8u\t%u\n", dev->name, i,
- lun->vlun.nr_free_blocks);
+ diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
+ (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
- spin_unlock(&lun->vlun.lock);
+ entries[i] -= cpu_to_le64(diff);
}
}
@@ -648,22 +631,14 @@ static struct nvmm_type gen = {
.create_tgt = gen_create_tgt,
.remove_tgt = gen_remove_tgt,
- .get_blk = gen_get_blk,
- .put_blk = gen_put_blk,
-
.submit_io = gen_submit_io,
.erase_blk = gen_erase_blk,
- .mark_blk = gen_mark_blk,
-
- .get_lun = gen_get_lun,
- .reserve_lun = gen_reserve_lun,
- .release_lun = gen_release_lun,
- .lun_info_print = gen_lun_info_print,
-
.get_area = gen_get_area,
.put_area = gen_put_area,
+ .trans_ppa = gen_trans_ppa,
+ .part_to_tgt = gen_part_to_tgt,
};
static int __init gen_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 8ecfa817d21d..6a4b3f368848 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -20,37 +20,41 @@
#include <linux/lightnvm.h>
-struct gen_lun {
- struct nvm_lun vlun;
-
- int reserved_blocks;
- /* lun block lists */
- struct list_head used_list; /* In-use blocks */
- struct list_head free_list; /* Not used blocks i.e. released
- * and ready for use
- */
- struct list_head bb_list; /* Bad blocks. Mutually exclusive with
- * free_list and used_list
- */
-};
-
struct gen_dev {
struct nvm_dev *dev;
int nr_luns;
- struct gen_lun *luns;
struct list_head area_list;
struct mutex lock;
struct list_head targets;
};
+/* Map between virtual and physical channel and lun */
+struct gen_ch_map {
+ int ch_off;
+ int nr_luns;
+ int *lun_offs;
+};
+
+struct gen_dev_map {
+ struct gen_ch_map *chnls;
+ int nr_chnls;
+};
+
struct gen_area {
struct list_head list;
sector_t begin;
sector_t end; /* end is excluded */
};
+static inline void *ch_map_to_lun_offs(struct gen_ch_map *ch_map)
+{
+ return ch_map + 1;
+}
+
+typedef int (gen_trans_fn)(struct nvm_tgt_dev *, struct ppa_addr *);
+
#define gen_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/lightnvm.h b/drivers/lightnvm/lightnvm.h
deleted file mode 100644
index 305c181509a6..000000000000
--- a/drivers/lightnvm/lightnvm.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2016 CNEX Labs. All rights reserved.
- * Initial release: Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
- */
-
-#ifndef LIGHTNVM_H
-#define LIGHTNVM_H
-
-#include <linux/lightnvm.h>
-
-/* core -> sysfs.c */
-int __must_check nvm_sysfs_register_dev(struct nvm_dev *);
-void nvm_sysfs_unregister_dev(struct nvm_dev *);
-int nvm_sysfs_register(void);
-void nvm_sysfs_unregister(void);
-
-/* sysfs > core */
-void nvm_free(struct nvm_dev *);
-
-#endif
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 37fcaadbf80c..9fb7de395915 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -28,6 +28,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block *rblk = a->rblk;
unsigned int pg_offset;
@@ -38,13 +39,13 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
spin_lock(&rblk->lock);
- div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
+ div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
rblk->nr_invalid_pages++;
spin_unlock(&rblk->lock);
- rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
+ rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
}
static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
@@ -116,62 +117,35 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- return (rblk->next_page == rrpc->dev->sec_per_blk);
+ struct nvm_tgt_dev *dev = rrpc->dev;
+
+ return (rblk->next_page == dev->geo.sec_per_blk);
}
/* Calculate relative addr for the given block, considering instantiated LUNs */
static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct nvm_block *blk = rblk->parent;
- int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
-
- return lun_blk * rrpc->dev->sec_per_blk;
-}
-
-/* Calculate global addr for the given block */
-static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_block *blk = rblk->parent;
-
- return blk->id * rrpc->dev->sec_per_blk;
-}
-
-static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
- struct ppa_addr r)
-{
- struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = r.ppa;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, dev->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, dev->sec_per_pg);
- div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, dev->pgs_per_blk);
- div_u64_rem(ppa, dev->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, dev->blks_per_lun);
- div_u64_rem(ppa, dev->luns_per_chnl, &luns);
- l.g.lun = luns;
-
- sector_div(ppa, dev->luns_per_chnl);
- l.g.ch = ppa;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun = rblk->rlun;
- return l;
+ return rlun->id * dev->geo.sec_per_blk;
}
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
+static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
+ struct rrpc_addr *gp)
{
+ struct rrpc_block *rblk = gp->rblk;
+ struct rrpc_lun *rlun = rblk->rlun;
+ u64 addr = gp->addr;
struct ppa_addr paddr;
paddr.ppa = addr;
- return linear_to_generic_addr(dev, paddr);
+ paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
+ paddr.g.ch = rlun->bppa.g.ch;
+ paddr.g.lun = rlun->bppa.g.lun;
+ paddr.g.blk = rblk->id;
+
+ return paddr;
}
/* requires lun->lock taken */
@@ -188,21 +162,47 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
*cur_rblk = new_rblk;
}
+static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
+ struct rrpc_lun *rlun)
+{
+ struct rrpc_block *rblk = NULL;
+
+ if (list_empty(&rlun->free_list))
+ goto out;
+
+ rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
+
+ list_move_tail(&rblk->list, &rlun->used_list);
+ rblk->state = NVM_BLK_ST_TGT;
+ rlun->nr_free_blocks--;
+
+out:
+ return rblk;
+}
+
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
unsigned long flags)
{
- struct nvm_block *blk;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block *rblk;
+ int is_gc = flags & NVM_IOTYPE_GC;
+
+ spin_lock(&rlun->lock);
+ if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
+ pr_err("nvm: rrpc: cannot give block to non GC request\n");
+ spin_unlock(&rlun->lock);
+ return NULL;
+ }
- blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
- if (!blk) {
- pr_err("nvm: rrpc: cannot get new block from media manager\n");
+ rblk = __rrpc_get_blk(rrpc, rlun);
+ if (!rblk) {
+ pr_err("nvm: rrpc: cannot get new block\n");
+ spin_unlock(&rlun->lock);
return NULL;
}
+ spin_unlock(&rlun->lock);
- rblk = rrpc_get_rblk(rlun, blk->id);
- blk->priv = rblk;
- bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
+ bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
rblk->next_page = 0;
rblk->nr_invalid_pages = 0;
atomic_set(&rblk->data_cmnt_size, 0);
@@ -212,7 +212,24 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- nvm_put_blk(rrpc->dev, rblk->parent);
+ struct rrpc_lun *rlun = rblk->rlun;
+
+ spin_lock(&rlun->lock);
+ if (rblk->state & NVM_BLK_ST_TGT) {
+ list_move_tail(&rblk->list, &rlun->free_list);
+ rlun->nr_free_blocks++;
+ rblk->state = NVM_BLK_ST_FREE;
+ } else if (rblk->state & NVM_BLK_ST_BAD) {
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ rblk->state = NVM_BLK_ST_BAD;
+ } else {
+ WARN_ON_ONCE(1);
+ pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id, rblk->state);
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ }
+ spin_unlock(&rlun->lock);
}
static void rrpc_put_blks(struct rrpc *rrpc)
@@ -280,13 +297,14 @@ static void rrpc_end_sync_bio(struct bio *bio)
*/
static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct request_queue *q = rrpc->dev->q;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct request_queue *q = dev->q;
struct rrpc_rev_addr *rev;
struct nvm_rq *rqd;
struct bio *bio;
struct page *page;
int slot;
- int nr_sec_per_blk = rrpc->dev->sec_per_blk;
+ int nr_sec_per_blk = dev->geo.sec_per_blk;
u64 phys_addr;
DECLARE_COMPLETION_ONSTACK(wait);
@@ -309,12 +327,12 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
nr_sec_per_blk)) < nr_sec_per_blk) {
/* Lock laddr */
- phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
+ phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
try:
spin_lock(&rrpc->rev_lock);
/* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
+ rev = &rrpc->rev_trans_map[phys_addr];
/* already updated by previous regular write */
if (rev->addr == ADDR_EMPTY) {
spin_unlock(&rrpc->rev_lock);
@@ -396,15 +414,23 @@ static void rrpc_block_gc(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct rrpc_lun *rlun = rblk->rlun;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct ppa_addr ppa;
mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
+ pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
if (rrpc_move_valid_pages(rrpc, rblk))
goto put_back;
- if (nvm_erase_blk(dev, rblk->parent))
+ ppa.ppa = 0;
+ ppa.g.ch = rlun->bppa.g.ch;
+ ppa.g.lun = rlun->bppa.g.lun;
+ ppa.g.blk = rblk->id;
+
+ if (nvm_erase_blk(dev, &ppa, 0))
goto put_back;
rrpc_put_blk(rrpc, rblk);
@@ -420,7 +446,7 @@ put_back:
/* the block with highest number of invalid pages, will be in the beginning
* of the list
*/
-static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
+static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
struct rrpc_block *rb)
{
if (ra->nr_invalid_pages == rb->nr_invalid_pages)
@@ -435,13 +461,13 @@ static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
{
struct list_head *prio_list = &rlun->prio_list;
- struct rrpc_block *rblock, *max;
+ struct rrpc_block *rblk, *max;
BUG_ON(list_empty(prio_list));
max = list_first_entry(prio_list, struct rrpc_block, prio);
- list_for_each_entry(rblock, prio_list, prio)
- max = rblock_max_invalid(max, rblock);
+ list_for_each_entry(rblk, prio_list, prio)
+ max = rblk_max_invalid(max, rblk);
return max;
}
@@ -450,36 +476,37 @@ static void rrpc_lun_gc(struct work_struct *work)
{
struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
struct rrpc *rrpc = rlun->rrpc;
- struct nvm_lun *lun = rlun->parent;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block_gc *gcb;
unsigned int nr_blocks_need;
- nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
+ nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
if (nr_blocks_need < rrpc->nr_luns)
nr_blocks_need = rrpc->nr_luns;
spin_lock(&rlun->lock);
- while (nr_blocks_need > lun->nr_free_blocks &&
+ while (nr_blocks_need > rlun->nr_free_blocks &&
!list_empty(&rlun->prio_list)) {
- struct rrpc_block *rblock = block_prio_find_max(rlun);
- struct nvm_block *block = rblock->parent;
+ struct rrpc_block *rblk = block_prio_find_max(rlun);
- if (!rblock->nr_invalid_pages)
+ if (!rblk->nr_invalid_pages)
break;
gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
if (!gcb)
break;
- list_del_init(&rblock->prio);
+ list_del_init(&rblk->prio);
- BUG_ON(!block_is_full(rrpc, rblock));
+ WARN_ON(!block_is_full(rrpc, rblk));
- pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
+ pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
gcb->rrpc = rrpc;
- gcb->rblk = rblock;
+ gcb->rblk = rblk;
INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
queue_work(rrpc->kgc_wq, &gcb->ws_gc);
@@ -504,8 +531,9 @@ static void rrpc_gc_queue(struct work_struct *work)
spin_unlock(&rlun->lock);
mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
- rblk->parent->id);
+ pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
}
static const struct block_device_operations rrpc_fops = {
@@ -529,8 +557,7 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
* estimate.
*/
rrpc_for_each_lun(rrpc, rlun, i) {
- if (rlun->parent->nr_free_blocks >
- max_free->parent->nr_free_blocks)
+ if (rlun->nr_free_blocks > max_free->nr_free_blocks)
max_free = rlun;
}
@@ -553,7 +580,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
gp->addr = paddr;
gp->rblk = rblk;
- rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
+ rev = &rrpc->rev_trans_map[gp->addr];
rev->addr = laddr;
spin_unlock(&rrpc->rev_lock);
@@ -568,7 +595,7 @@ static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
if (block_is_full(rrpc, rblk))
goto out;
- addr = block_to_addr(rrpc, rblk) + rblk->next_page;
+ addr = rblk->next_page;
rblk->next_page++;
out:
@@ -582,20 +609,22 @@ out:
* Returns rrpc_addr with the physical address and block. Returns NULL if no
* blocks in the next rlun are available.
*/
-static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
+static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
int is_gc)
{
+ struct nvm_tgt_dev *tgt_dev = rrpc->dev;
struct rrpc_lun *rlun;
struct rrpc_block *rblk, **cur_rblk;
- struct nvm_lun *lun;
+ struct rrpc_addr *p;
+ struct ppa_addr ppa;
u64 paddr;
int gc_force = 0;
+ ppa.ppa = ADDR_EMPTY;
rlun = rrpc_get_lun_rr(rrpc, is_gc);
- lun = rlun->parent;
- if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
- return NULL;
+ if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
+ return ppa;
/*
* page allocation steps:
@@ -652,10 +681,15 @@ new_blk:
}
pr_err("rrpc: failed to allocate new block\n");
- return NULL;
+ return ppa;
done:
spin_unlock(&rlun->lock);
- return rrpc_update_map(rrpc, laddr, rblk, paddr);
+ p = rrpc_update_map(rrpc, laddr, rblk, paddr);
+ if (!p)
+ return ppa;
+
+ /* return global address */
+ return rrpc_ppa_to_gaddr(tgt_dev, p);
}
static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
@@ -675,21 +709,70 @@ static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
queue_work(rrpc->kgc_wq, &gcb->ws_gc);
}
+static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
+{
+ struct rrpc_lun *rlun = NULL;
+ int i;
+
+ for (i = 0; i < rrpc->nr_luns; i++) {
+ if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
+ rrpc->luns[i].bppa.g.lun == p.g.lun) {
+ rlun = &rrpc->luns[i];
+ break;
+ }
+ }
+
+ return rlun;
+}
+
+static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
+{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun;
+ struct rrpc_block *rblk;
+
+ rlun = rrpc_ppa_to_lun(rrpc, ppa);
+ rblk = &rlun->blocks[ppa.g.blk];
+ rblk->state = NVM_BLK_ST_BAD;
+
+ nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
+}
+
+static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
+{
+ void *comp_bits = &rqd->ppa_status;
+ struct ppa_addr ppa, prev_ppa;
+ int nr_ppas = rqd->nr_ppas;
+ int bit;
+
+ if (rqd->nr_ppas == 1)
+ __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
+
+ ppa_set_empty(&prev_ppa);
+ bit = -1;
+ while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
+ ppa = rqd->ppa_list[bit];
+ if (ppa_cmp_blk(ppa, prev_ppa))
+ continue;
+
+ __rrpc_mark_bad_block(rrpc, ppa);
+ }
+}
+
static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
sector_t laddr, uint8_t npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_addr *p;
struct rrpc_block *rblk;
- struct nvm_lun *lun;
int cmnt_size, i;
for (i = 0; i < npages; i++) {
p = &rrpc->trans_map[laddr + i];
rblk = p->rblk;
- lun = rblk->parent->lun;
cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
+ if (unlikely(cmnt_size == dev->geo.sec_per_blk))
rrpc_run_gc(rrpc, rblk);
}
}
@@ -697,12 +780,17 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
static void rrpc_end_io(struct nvm_rq *rqd)
{
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
uint8_t npages = rqd->nr_ppas;
sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
- if (bio_data_dir(rqd->bio) == WRITE)
+ if (bio_data_dir(rqd->bio) == WRITE) {
+ if (rqd->error == NVM_RSP_ERR_FAILWRITE)
+ rrpc_mark_bad_block(rrpc, rqd);
+
rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+ }
bio_put(rqd->bio);
@@ -712,7 +800,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
rrpc_unlock_rq(rrpc, rqd);
if (npages > 1)
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
mempool_free(rqd, rrpc->rq_pool);
}
@@ -720,6 +808,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, int npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
struct rrpc_addr *gp;
sector_t laddr = rrpc_get_laddr(bio);
@@ -727,7 +816,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
int i;
if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
return NVM_IO_REQUEUE;
}
@@ -737,12 +826,11 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
gp = &rrpc->trans_map[laddr + i];
if (gp->rblk) {
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
- gp->addr);
+ rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
} else {
BUG_ON(is_gc);
rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
rqd->dma_ppa_list);
return NVM_IO_DONE;
}
@@ -756,7 +844,6 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
unsigned long flags)
{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
int is_gc = flags & NVM_IOTYPE_GC;
sector_t laddr = rrpc_get_laddr(bio);
struct rrpc_addr *gp;
@@ -768,7 +855,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
gp = &rrpc->trans_map[laddr];
if (gp->rblk) {
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
+ rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
} else {
BUG_ON(is_gc);
rrpc_unlock_rq(rrpc, rqd);
@@ -776,7 +863,6 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
}
rqd->opcode = NVM_OP_HBREAD;
- rrqd->addr = gp;
return NVM_IO_OK;
}
@@ -784,31 +870,31 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, int npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct rrpc_addr *p;
+ struct ppa_addr p;
sector_t laddr = rrpc_get_laddr(bio);
int is_gc = flags & NVM_IOTYPE_GC;
int i;
if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
return NVM_IO_REQUEUE;
}
for (i = 0; i < npages; i++) {
/* We assume that mapping occurs at 4KB granularity */
p = rrpc_map_page(rrpc, laddr + i, is_gc);
- if (!p) {
+ if (p.ppa == ADDR_EMPTY) {
BUG_ON(is_gc);
rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
rqd->dma_ppa_list);
rrpc_gc_kick(rrpc);
return NVM_IO_REQUEUE;
}
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
- p->addr);
+ rqd->ppa_list[i] = p;
}
rqd->opcode = NVM_OP_HBWRITE;
@@ -819,8 +905,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags)
{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- struct rrpc_addr *p;
+ struct ppa_addr p;
int is_gc = flags & NVM_IOTYPE_GC;
sector_t laddr = rrpc_get_laddr(bio);
@@ -828,16 +913,15 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
return NVM_IO_REQUEUE;
p = rrpc_map_page(rrpc, laddr, is_gc);
- if (!p) {
+ if (p.ppa == ADDR_EMPTY) {
BUG_ON(is_gc);
rrpc_unlock_rq(rrpc, rqd);
rrpc_gc_kick(rrpc);
return NVM_IO_REQUEUE;
}
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
+ rqd->ppa_addr = p;
rqd->opcode = NVM_OP_HBWRITE;
- rrqd->addr = p;
return NVM_IO_OK;
}
@@ -845,8 +929,10 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+
if (npages > 1) {
- rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
+ rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
&rqd->dma_ppa_list);
if (!rqd->ppa_list) {
pr_err("rrpc: not able to allocate ppa list\n");
@@ -869,14 +955,15 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags)
{
- int err;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
uint8_t nr_pages = rrpc_get_pages(bio);
int bio_size = bio_sectors(bio) << 9;
+ int err;
- if (bio_size < rrpc->dev->sec_size)
+ if (bio_size < dev->geo.sec_size)
return NVM_IO_ERR;
- else if (bio_size > rrpc->dev->max_rq_size)
+ else if (bio_size > dev->geo.max_rq_size)
return NVM_IO_ERR;
err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
@@ -889,15 +976,15 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
rqd->nr_ppas = nr_pages;
rrq->flags = flags;
- err = nvm_submit_io(rrpc->dev, rqd);
+ err = nvm_submit_io(dev, rqd);
if (err) {
pr_err("rrpc: I/O submission failed: %d\n", err);
bio_put(bio);
if (!(flags & NVM_IOTYPE_GC)) {
rrpc_unlock_rq(rrpc, rqd);
if (rqd->nr_ppas > 1)
- nvm_dev_dma_free(rrpc->dev,
- rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
+ rqd->dma_ppa_list);
}
return NVM_IO_ERR;
}
@@ -911,6 +998,8 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
struct nvm_rq *rqd;
int err;
+ blk_queue_split(q, &bio, q->bio_split);
+
if (bio_op(bio) == REQ_OP_DISCARD) {
rrpc_discard(rrpc, bio);
return BLK_QC_T_NONE;
@@ -997,25 +1086,24 @@ static void rrpc_map_free(struct rrpc *rrpc)
static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
{
struct rrpc *rrpc = (struct rrpc *)private;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_addr *addr = rrpc->trans_map + slba;
struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- u64 elba = slba + nlb;
+ struct rrpc_lun *rlun;
+ struct rrpc_block *rblk;
u64 i;
- if (unlikely(elba > dev->total_secs)) {
- pr_err("nvm: L2P data from device is out of bounds!\n");
- return -EINVAL;
- }
-
for (i = 0; i < nlb; i++) {
+ struct ppa_addr gaddr;
u64 pba = le64_to_cpu(entries[i]);
unsigned int mod;
+
/* LNVM treats address-spaces as silos, LBA and PBA are
* equally large and zero-indexed.
*/
if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
pr_err("nvm: L2P data entry is out of bounds!\n");
+ pr_err("nvm: Maybe loaded an old target L2P\n");
return -EINVAL;
}
@@ -1028,7 +1116,27 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
div_u64_rem(pba, rrpc->nr_sects, &mod);
+ gaddr = rrpc_recov_addr(dev, pba);
+ rlun = rrpc_ppa_to_lun(rrpc, gaddr);
+ if (!rlun) {
+ pr_err("rrpc: l2p corruption on lba %llu\n",
+ slba + i);
+ return -EINVAL;
+ }
+
+ rblk = &rlun->blocks[gaddr.g.blk];
+ if (!rblk->state) {
+ /* at this point, we don't know anything about the
+ * block. It's up to the FTL on top to re-etablish the
+ * block state. The block is assumed to be open.
+ */
+ list_move_tail(&rblk->list, &rlun->used_list);
+ rblk->state = NVM_BLK_ST_TGT;
+ rlun->nr_free_blocks--;
+ }
+
addr[i].addr = pba;
+ addr[i].rblk = rblk;
raddr[mod].addr = slba + i;
}
@@ -1037,7 +1145,7 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
static int rrpc_map_init(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
sector_t i;
int ret;
@@ -1058,12 +1166,9 @@ static int rrpc_map_init(struct rrpc *rrpc)
r->addr = ADDR_EMPTY;
}
- if (!dev->ops->get_l2p_tbl)
- return 0;
-
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
- rrpc_l2p_update, rrpc);
+ ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
+ rrpc_l2p_update, rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1102,7 +1207,7 @@ static int rrpc_core_init(struct rrpc *rrpc)
if (!rrpc->page_pool)
return -ENOMEM;
- rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
+ rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
rrpc_gcb_cache);
if (!rrpc->gcb_pool)
return -ENOMEM;
@@ -1126,8 +1231,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
static void rrpc_luns_free(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvm_lun *lun;
struct rrpc_lun *rlun;
int i;
@@ -1136,23 +1239,74 @@ static void rrpc_luns_free(struct rrpc *rrpc)
for (i = 0; i < rrpc->nr_luns; i++) {
rlun = &rrpc->luns[i];
- lun = rlun->parent;
- if (!lun)
- break;
- dev->mt->release_lun(dev, lun->id);
vfree(rlun->blocks);
}
kfree(rrpc->luns);
}
-static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
+static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct rrpc_block *rblk;
+ struct ppa_addr ppa;
+ u8 *blks;
+ int nr_blks;
+ int i;
+ int ret;
+
+ if (!dev->parent->ops->get_bb_tbl)
+ return 0;
+
+ nr_blks = geo->blks_per_lun * geo->plane_mode;
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
+
+ ppa.ppa = 0;
+ ppa.g.ch = rlun->bppa.g.ch;
+ ppa.g.lun = rlun->bppa.g.lun;
+
+ ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
+ if (ret) {
+ pr_err("rrpc: could not get BB table\n");
+ goto out;
+ }
+
+ nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
+ for (i = 0; i < nr_blks; i++) {
+ if (blks[i] == NVM_BLK_T_FREE)
+ continue;
+
+ rblk = &rlun->blocks[i];
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ rblk->state = NVM_BLK_ST_BAD;
+ rlun->nr_free_blocks--;
+ }
+
+out:
+ kfree(blks);
+ return ret;
+}
+
+static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
+{
+ rlun->bppa.ppa = 0;
+ rlun->bppa.g.ch = ppa.g.ch;
+ rlun->bppa.g.lun = ppa.g.lun;
+}
+
+static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
+{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
struct rrpc_lun *rlun;
int i, j, ret = -EINVAL;
- if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+ if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
pr_err("rrpc: number of pages per block too high.");
return -EINVAL;
}
@@ -1166,43 +1320,46 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
/* 1:1 mapping */
for (i = 0; i < rrpc->nr_luns; i++) {
- int lunid = lun_begin + i;
- struct nvm_lun *lun;
-
- if (dev->mt->reserve_lun(dev, lunid)) {
- pr_err("rrpc: lun %u is already allocated\n", lunid);
- goto err;
- }
-
- lun = dev->mt->get_lun(dev, lunid);
- if (!lun)
- goto err;
-
rlun = &rrpc->luns[i];
- rlun->parent = lun;
+ rlun->id = i;
+ rrpc_set_lun_ppa(rlun, luns[i]);
rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- rrpc->dev->blks_per_lun);
+ geo->blks_per_lun);
if (!rlun->blocks) {
ret = -ENOMEM;
goto err;
}
- for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
+ INIT_LIST_HEAD(&rlun->free_list);
+ INIT_LIST_HEAD(&rlun->used_list);
+ INIT_LIST_HEAD(&rlun->bb_list);
+
+ for (j = 0; j < geo->blks_per_lun; j++) {
struct rrpc_block *rblk = &rlun->blocks[j];
- struct nvm_block *blk = &lun->blocks[j];
- rblk->parent = blk;
+ rblk->id = j;
rblk->rlun = rlun;
+ rblk->state = NVM_BLK_T_FREE;
INIT_LIST_HEAD(&rblk->prio);
+ INIT_LIST_HEAD(&rblk->list);
spin_lock_init(&rblk->lock);
+
+ list_add_tail(&rblk->list, &rlun->free_list);
}
rlun->rrpc = rrpc;
+ rlun->nr_free_blocks = geo->blks_per_lun;
+ rlun->reserved_blocks = 2; /* for GC only */
+
INIT_LIST_HEAD(&rlun->prio_list);
INIT_LIST_HEAD(&rlun->wblk_list);
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
spin_lock_init(&rlun->lock);
+
+ if (rrpc_bb_discovery(dev, rlun))
+ goto err;
+
}
return 0;
@@ -1213,27 +1370,25 @@ err:
/* returns 0 on success and stores the beginning address in *begin */
static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvmm_type *mt = dev->mt;
- sector_t size = rrpc->nr_sects * dev->sec_size;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ sector_t size = rrpc->nr_sects * dev->geo.sec_size;
int ret;
size >>= 9;
- ret = mt->get_area(dev, begin, size);
+ ret = nvm_get_area(dev, begin, size);
if (!ret)
- *begin >>= (ilog2(dev->sec_size) - 9);
+ *begin >>= (ilog2(dev->geo.sec_size) - 9);
return ret;
}
static void rrpc_area_free(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvmm_type *mt = dev->mt;
- sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
- mt->put_area(dev, begin);
+ nvm_put_area(dev, begin);
}
static void rrpc_free(struct rrpc *rrpc)
@@ -1262,11 +1417,11 @@ static void rrpc_exit(void *private)
static sector_t rrpc_capacity(void *private)
{
struct rrpc *rrpc = private;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
sector_t reserved, provisioned;
/* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
+ reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
provisioned = rrpc->nr_sects - reserved;
if (reserved > rrpc->nr_sects) {
@@ -1285,13 +1440,13 @@ static sector_t rrpc_capacity(void *private)
*/
static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
int offset;
struct rrpc_addr *laddr;
u64 bpaddr, paddr, pladdr;
bpaddr = block_to_rel_addr(rrpc, rblk);
- for (offset = 0; offset < dev->sec_per_blk; offset++) {
+ for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
paddr = bpaddr + offset;
pladdr = rrpc->rev_trans_map[paddr].addr;
@@ -1311,6 +1466,7 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
static int rrpc_blocks_init(struct rrpc *rrpc)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_lun *rlun;
struct rrpc_block *rblk;
int lun_iter, blk_iter;
@@ -1318,7 +1474,7 @@ static int rrpc_blocks_init(struct rrpc *rrpc)
for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
rlun = &rrpc->luns[lun_iter];
- for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
+ for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
blk_iter++) {
rblk = &rlun->blocks[blk_iter];
rrpc_block_map_update(rrpc, rblk);
@@ -1357,11 +1513,11 @@ err:
static struct nvm_tgt_type tt_rrpc;
-static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
- int lun_begin, int lun_end)
+static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk)
{
struct request_queue *bqueue = dev->q;
struct request_queue *tqueue = tdisk->queue;
+ struct nvm_geo *geo = &dev->geo;
struct rrpc *rrpc;
sector_t soffset;
int ret;
@@ -1384,9 +1540,8 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
spin_lock_init(&rrpc->bio_lock);
INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
- rrpc->nr_luns = lun_end - lun_begin + 1;
- rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
- rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
+ rrpc->nr_luns = geo->nr_luns;
+ rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);
@@ -1398,15 +1553,12 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
}
rrpc->soffset = soffset;
- ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
+ ret = rrpc_luns_init(rrpc, dev->luns);
if (ret) {
pr_err("nvm: rrpc: could not initialize luns\n");
goto err;
}
- rrpc->poffset = dev->sec_per_lun * lun_begin;
- rrpc->lun_offset = lun_begin;
-
ret = rrpc_core_init(rrpc);
if (ret) {
pr_err("nvm: rrpc: could not initialize core\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 5e87d52cb983..94e4d73116b2 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -48,14 +48,15 @@ struct rrpc_inflight_rq {
struct rrpc_rq {
struct rrpc_inflight_rq inflight_rq;
- struct rrpc_addr *addr;
unsigned long flags;
};
struct rrpc_block {
- struct nvm_block *parent;
+ int id; /* id inside of LUN */
struct rrpc_lun *rlun;
- struct list_head prio;
+
+ struct list_head prio; /* LUN CG list */
+ struct list_head list; /* LUN free, used, bb list */
#define MAX_INVALID_PAGES_STORAGE 8
/* Bitmap for invalid page intries */
@@ -65,21 +66,38 @@ struct rrpc_block {
/* number of pages that are invalid, wrt host page size */
unsigned int nr_invalid_pages;
+ int state;
+
spinlock_t lock;
atomic_t data_cmnt_size; /* data pages committed to stable storage */
};
struct rrpc_lun {
struct rrpc *rrpc;
- struct nvm_lun *parent;
+
+ int id;
+ struct ppa_addr bppa;
+
struct rrpc_block *cur, *gc_cur;
struct rrpc_block *blocks; /* Reference to block allocation */
struct list_head prio_list; /* Blocks that may be GC'ed */
struct list_head wblk_list; /* Queued blocks to be written to */
+ /* lun block lists */
+ struct list_head used_list; /* In-use blocks */
+ struct list_head free_list; /* Not used blocks i.e. released
+ * and ready for use
+ */
+ struct list_head bb_list; /* Bad blocks. Mutually exclusive with
+ * free_list and used_list
+ */
+ unsigned int nr_free_blocks; /* Number of unused blocks */
+
struct work_struct ws_gc;
+ int reserved_blocks;
+
spinlock_t lock;
};
@@ -87,19 +105,16 @@ struct rrpc {
/* instance must be kept in top to resolve rrpc in unprep */
struct nvm_tgt_instance instance;
- struct nvm_dev *dev;
+ struct nvm_tgt_dev *dev;
struct gendisk *disk;
sector_t soffset; /* logical sector offset */
- u64 poffset; /* physical page offset */
- int lun_offset;
int nr_luns;
struct rrpc_lun *luns;
/* calculated values */
unsigned long long nr_sects;
- unsigned long total_blocks;
/* Write strategy variables. Move these into each for structure for each
* strategy
@@ -150,13 +165,37 @@ struct rrpc_rev_addr {
u64 addr;
};
-static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun,
- int blk_id)
+static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
+ struct ppa_addr r)
+{
+ struct ppa_addr l;
+ int secs, pgs;
+ sector_t ppa = r.ppa;
+
+ l.ppa = 0;
+
+ div_u64_rem(ppa, geo->sec_per_pg, &secs);
+ l.g.sec = secs;
+
+ sector_div(ppa, geo->sec_per_pg);
+ div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
+ l.g.pg = pgs;
+
+ return l;
+}
+
+static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
+{
+ return linear_to_generic_addr(&dev->geo, pba);
+}
+
+static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct rrpc *rrpc = rlun->rrpc;
- int lun_blk = blk_id % rrpc->dev->blks_per_lun;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct rrpc_lun *rlun = rblk->rlun;
- return &rlun->blocks[lun_blk];
+ return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
}
static inline sector_t rrpc_get_laddr(struct bio *bio)
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index a75bd28aaca3..12002bf4efc2 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -62,7 +62,8 @@ static void nvm_cpu_to_sysblk(struct nvm_system_block *sb,
static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
{
- int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
+ struct nvm_geo *geo = &dev->geo;
+ int nr_rows = min_t(int, MAX_SYSBLKS, geo->nr_chnls);
int i;
for (i = 0; i < nr_rows; i++)
@@ -71,7 +72,7 @@ static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
/* if possible, place sysblk at first channel, middle channel and last
* channel of the device. If not, create only one or two sys blocks
*/
- switch (dev->nr_chnls) {
+ switch (geo->nr_chnls) {
case 2:
sysblk_ppas[1].g.ch = 1;
/* fall-through */
@@ -80,8 +81,8 @@ static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
break;
default:
sysblk_ppas[0].g.ch = 0;
- sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
- sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
+ sysblk_ppas[1].g.ch = geo->nr_chnls / 2;
+ sysblk_ppas[2].g.ch = geo->nr_chnls - 1;
break;
}
@@ -162,11 +163,12 @@ static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
struct ppa_addr *ppas, int get_free)
{
+ struct nvm_geo *geo = &dev->geo;
int i, nr_blks, ret = 0;
u8 *blks;
s->nr_ppas = 0;
- nr_blks = dev->blks_per_lun * dev->plane_mode;
+ nr_blks = geo->blks_per_lun * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks)
@@ -210,13 +212,14 @@ err_get:
static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
struct nvm_system_block *sblk)
{
+ struct nvm_geo *geo = &dev->geo;
struct nvm_system_block *cur;
int pg, ret, found = 0;
/* the full buffer for a flash page is allocated. Only the first of it
* contains the system block information
*/
- cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
+ cur = kmalloc(geo->pfpg_size, GFP_KERNEL);
if (!cur)
return -ENOMEM;
@@ -225,7 +228,7 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
ppa->g.pg = ppa_to_slc(dev, pg);
ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
- cur, dev->pfpg_size);
+ cur, geo->pfpg_size);
if (ret) {
if (ret == NVM_RSP_ERR_EMPTYPAGE) {
pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
@@ -267,34 +270,16 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
return found;
}
-static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
+static int nvm_sysblk_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s,
+ int type)
{
- struct nvm_rq rqd;
- int ret;
-
- if (s->nr_ppas > dev->ops->max_phys_sect) {
- pr_err("nvm: unable to update all sysblocks atomically\n");
- return -EINVAL;
- }
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
- nvm_generic_to_addr_mode(dev, &rqd);
-
- ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
- nvm_free_rqd_ppalist(dev, &rqd);
- if (ret) {
- pr_err("nvm: sysblk failed bb mark\n");
- return -EINVAL;
- }
-
- return 0;
+ return nvm_set_bb_tbl(dev, s->ppas, s->nr_ppas, type);
}
static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
struct sysblk_scan *s)
{
+ struct nvm_geo *geo = &dev->geo;
struct nvm_system_block nvmsb;
void *buf;
int i, sect, ret = 0;
@@ -302,12 +287,12 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
nvm_cpu_to_sysblk(&nvmsb, info);
- buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
+ buf = kzalloc(geo->pfpg_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
- ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
+ ppas = kcalloc(geo->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
if (!ppas) {
ret = -ENOMEM;
goto err;
@@ -324,15 +309,15 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
ppas[0].g.pg);
/* Expand to all sectors within a flash page */
- if (dev->sec_per_pg > 1) {
- for (sect = 1; sect < dev->sec_per_pg; sect++) {
+ if (geo->sec_per_pg > 1) {
+ for (sect = 1; sect < geo->sec_per_pg; sect++) {
ppas[sect].ppa = ppas[0].ppa;
ppas[sect].g.sec = sect;
}
}
- ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
- NVM_IO_SLC_MODE, buf, dev->pfpg_size);
+ ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PWRITE,
+ NVM_IO_SLC_MODE, buf, geo->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed program (%u %u %u)\n",
ppas[0].g.ch,
@@ -341,8 +326,8 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
break;
}
- ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
- NVM_IO_SLC_MODE, buf, dev->pfpg_size);
+ ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PREAD,
+ NVM_IO_SLC_MODE, buf, geo->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed read (%u %u %u)\n",
ppas[0].g.ch,
@@ -379,7 +364,7 @@ static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
ppa->g.pg = ppa_to_slc(dev, 0);
- ret = nvm_erase_ppa(dev, ppa, 1);
+ ret = nvm_erase_ppa(dev, ppa, 1, 0);
if (ret)
return ret;
@@ -546,6 +531,7 @@ err_sysblk:
int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
int ret;
@@ -560,7 +546,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
return -EINVAL;
- if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
+ if (!(geo->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
pr_err("nvm: memory does not support SLC access\n");
return -EINVAL;
}
@@ -573,7 +559,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
if (ret)
goto err_mark;
- ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
+ ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
if (ret)
goto err_mark;
@@ -590,11 +576,11 @@ static int factory_nblks(int nblks)
return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
}
-static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa)
+static unsigned int factory_blk_offset(struct nvm_geo *geo, struct ppa_addr ppa)
{
- int nblks = factory_nblks(dev->blks_per_lun);
+ int nblks = factory_nblks(geo->blks_per_lun);
- return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
+ return ((ppa.g.ch * geo->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
BITS_PER_LONG;
}
@@ -608,7 +594,7 @@ static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
if (nr_blks < 0)
return nr_blks;
- lunoff = factory_blk_offset(dev, ppa);
+ lunoff = factory_blk_offset(&dev->geo, ppa);
/* non-set bits correspond to the block must be erased */
for (i = 0; i < nr_blks; i++) {
@@ -637,19 +623,19 @@ static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
int max_ppas, unsigned long *blk_bitmap)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr ppa;
int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
unsigned long *offset;
while (!done) {
done = 1;
- nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
- idx = factory_blk_offset(dev, ppa);
+ nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
+ idx = factory_blk_offset(geo, ppa);
offset = &blk_bitmap[idx];
- blkid = find_first_zero_bit(offset,
- dev->blks_per_lun);
- if (blkid >= dev->blks_per_lun)
+ blkid = find_first_zero_bit(offset, geo->blks_per_lun);
+ if (blkid >= geo->blks_per_lun)
continue;
set_bit(blkid, offset);
@@ -674,16 +660,17 @@ static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
int flags)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr ppa;
int ch, lun, nr_blks, ret = 0;
u8 *blks;
- nr_blks = dev->blks_per_lun * dev->plane_mode;
+ nr_blks = geo->blks_per_lun * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks)
return -ENOMEM;
- nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+ nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
ret = nvm_get_bb_tbl(dev, ppa, blks);
if (ret)
pr_err("nvm: failed bb tbl for ch%u lun%u\n",
@@ -701,14 +688,15 @@ static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
int nvm_dev_factory(struct nvm_dev *dev, int flags)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr *ppas;
int ppa_cnt, ret = -ENOMEM;
- int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
+ int max_ppas = dev->ops->max_phys_sect / geo->nr_planes;
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
unsigned long *blk_bitmap;
- blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
+ blk_bitmap = kzalloc(factory_nblks(geo->blks_per_lun) * geo->nr_luns,
GFP_KERNEL);
if (!blk_bitmap)
return ret;
@@ -725,7 +713,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
/* continue to erase until list of blks until empty */
while ((ppa_cnt =
nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
- nvm_erase_ppa(dev, ppas, ppa_cnt);
+ nvm_erase_ppa(dev, ppas, ppa_cnt, 0);
/* mark host reserved blocks free */
if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
@@ -733,7 +721,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
mutex_lock(&dev->mlock);
ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (!ret)
- ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
+ ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
mutex_unlock(&dev->mlock);
}
err_ppas:
diff --git a/drivers/lightnvm/sysfs.c b/drivers/lightnvm/sysfs.c
deleted file mode 100644
index 0338c27ab95a..000000000000
--- a/drivers/lightnvm/sysfs.c
+++ /dev/null
@@ -1,198 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/lightnvm.h>
-#include <linux/miscdevice.h>
-#include <linux/kobject.h>
-#include <linux/blk-mq.h>
-
-#include "lightnvm.h"
-
-static ssize_t nvm_dev_attr_show(struct device *dev,
- struct device_attribute *dattr, char *page)
-{
- struct nvm_dev *ndev = container_of(dev, struct nvm_dev, dev);
- struct nvm_id *id = &ndev->identity;
- struct nvm_id_group *grp = &id->groups[0];
- struct attribute *attr = &dattr->attr;
-
- if (strcmp(attr->name, "version") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
- } else if (strcmp(attr->name, "vendor_opcode") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
- } else if (strcmp(attr->name, "capabilities") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
- } else if (strcmp(attr->name, "device_mode") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
- } else if (strcmp(attr->name, "media_manager") == 0) {
- if (!ndev->mt)
- return scnprintf(page, PAGE_SIZE, "%s\n", "none");
- return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
- } else if (strcmp(attr->name, "ppa_format") == 0) {
- return scnprintf(page, PAGE_SIZE,
- "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- id->ppaf.ch_offset, id->ppaf.ch_len,
- id->ppaf.lun_offset, id->ppaf.lun_len,
- id->ppaf.pln_offset, id->ppaf.pln_len,
- id->ppaf.blk_offset, id->ppaf.blk_len,
- id->ppaf.pg_offset, id->ppaf.pg_len,
- id->ppaf.sect_offset, id->ppaf.sect_len);
- } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
- } else if (strcmp(attr->name, "flash_media_type") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
- } else if (strcmp(attr->name, "num_channels") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
- } else if (strcmp(attr->name, "num_luns") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
- } else if (strcmp(attr->name, "num_planes") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
- } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
- } else if (strcmp(attr->name, "num_pages") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
- } else if (strcmp(attr->name, "page_size") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
- } else if (strcmp(attr->name, "hw_sector_size") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
- } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
- } else if (strcmp(attr->name, "read_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
- } else if (strcmp(attr->name, "read_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
- } else if (strcmp(attr->name, "prog_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
- } else if (strcmp(attr->name, "prog_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
- } else if (strcmp(attr->name, "erase_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
- } else if (strcmp(attr->name, "erase_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
- } else if (strcmp(attr->name, "multiplane_modes") == 0) {
- return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
- } else if (strcmp(attr->name, "media_capabilities") == 0) {
- return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
- } else if (strcmp(attr->name, "max_phys_secs") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n",
- ndev->ops->max_phys_sect);
- } else {
- return scnprintf(page,
- PAGE_SIZE,
- "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
- attr->name);
- }
-}
-
-#define NVM_DEV_ATTR_RO(_name) \
- DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
-
-static NVM_DEV_ATTR_RO(version);
-static NVM_DEV_ATTR_RO(vendor_opcode);
-static NVM_DEV_ATTR_RO(capabilities);
-static NVM_DEV_ATTR_RO(device_mode);
-static NVM_DEV_ATTR_RO(ppa_format);
-static NVM_DEV_ATTR_RO(media_manager);
-
-static NVM_DEV_ATTR_RO(media_type);
-static NVM_DEV_ATTR_RO(flash_media_type);
-static NVM_DEV_ATTR_RO(num_channels);
-static NVM_DEV_ATTR_RO(num_luns);
-static NVM_DEV_ATTR_RO(num_planes);
-static NVM_DEV_ATTR_RO(num_blocks);
-static NVM_DEV_ATTR_RO(num_pages);
-static NVM_DEV_ATTR_RO(page_size);
-static NVM_DEV_ATTR_RO(hw_sector_size);
-static NVM_DEV_ATTR_RO(oob_sector_size);
-static NVM_DEV_ATTR_RO(read_typ);
-static NVM_DEV_ATTR_RO(read_max);
-static NVM_DEV_ATTR_RO(prog_typ);
-static NVM_DEV_ATTR_RO(prog_max);
-static NVM_DEV_ATTR_RO(erase_typ);
-static NVM_DEV_ATTR_RO(erase_max);
-static NVM_DEV_ATTR_RO(multiplane_modes);
-static NVM_DEV_ATTR_RO(media_capabilities);
-static NVM_DEV_ATTR_RO(max_phys_secs);
-
-#define NVM_DEV_ATTR(_name) (dev_attr_##_name##)
-
-static struct attribute *nvm_dev_attrs[] = {
- &dev_attr_version.attr,
- &dev_attr_vendor_opcode.attr,
- &dev_attr_capabilities.attr,
- &dev_attr_device_mode.attr,
- &dev_attr_media_manager.attr,
-
- &dev_attr_ppa_format.attr,
- &dev_attr_media_type.attr,
- &dev_attr_flash_media_type.attr,
- &dev_attr_num_channels.attr,
- &dev_attr_num_luns.attr,
- &dev_attr_num_planes.attr,
- &dev_attr_num_blocks.attr,
- &dev_attr_num_pages.attr,
- &dev_attr_page_size.attr,
- &dev_attr_hw_sector_size.attr,
- &dev_attr_oob_sector_size.attr,
- &dev_attr_read_typ.attr,
- &dev_attr_read_max.attr,
- &dev_attr_prog_typ.attr,
- &dev_attr_prog_max.attr,
- &dev_attr_erase_typ.attr,
- &dev_attr_erase_max.attr,
- &dev_attr_multiplane_modes.attr,
- &dev_attr_media_capabilities.attr,
- &dev_attr_max_phys_secs.attr,
- NULL,
-};
-
-static struct attribute_group nvm_dev_attr_group = {
- .name = "lightnvm",
- .attrs = nvm_dev_attrs,
-};
-
-static const struct attribute_group *nvm_dev_attr_groups[] = {
- &nvm_dev_attr_group,
- NULL,
-};
-
-static void nvm_dev_release(struct device *device)
-{
- struct nvm_dev *dev = container_of(device, struct nvm_dev, dev);
- struct request_queue *q = dev->q;
-
- pr_debug("nvm/sysfs: `nvm_dev_release`\n");
-
- blk_mq_unregister_dev(device, q);
-
- nvm_free(dev);
-}
-
-static struct device_type nvm_type = {
- .name = "lightnvm",
- .groups = nvm_dev_attr_groups,
- .release = nvm_dev_release,
-};
-
-int nvm_sysfs_register_dev(struct nvm_dev *dev)
-{
- int ret;
-
- if (!dev->parent_dev)
- return 0;
-
- dev->dev.parent = dev->parent_dev;
- dev_set_name(&dev->dev, "%s", dev->name);
- dev->dev.type = &nvm_type;
- device_initialize(&dev->dev);
- ret = device_add(&dev->dev);
-
- if (!ret)
- blk_mq_register_dev(&dev->dev, dev->q);
-
- return ret;
-}
-
-void nvm_sysfs_unregister_dev(struct nvm_dev *dev)
-{
- if (dev && dev->parent_dev)
- kobject_put(&dev->dev.kobj);
-}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 81d3db40cd7b..6fdd8e252760 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -297,7 +297,7 @@ static void bch_btree_node_read(struct btree *b)
bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = &cl;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
+ bio->bi_opf = REQ_OP_READ | REQ_META;
bch_bio_map(bio, b->keys.set[0].data);
@@ -393,7 +393,7 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl;
b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
- bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
+ b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
bch_bio_map(b->bio, i);
/*
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 333a1e5f6ae6..06f55056aaae 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -52,7 +52,7 @@ void bch_btree_verify(struct btree *b)
bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
+ bio->bi_opf = REQ_OP_READ | REQ_META;
bch_bio_map(bio, sorted);
submit_bio_wait(bio);
@@ -107,22 +107,26 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
struct bio *check;
- struct bio_vec bv;
- struct bvec_iter iter;
+ struct bio_vec bv, cbv;
+ struct bvec_iter iter, citer = { 0 };
check = bio_clone(bio, GFP_NOIO);
if (!check)
return;
- bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC);
+ check->bi_opf = REQ_OP_READ;
if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;
submit_bio_wait(check);
+ citer.bi_size = UINT_MAX;
bio_for_each_segment(bv, bio, iter) {
void *p1 = kmap_atomic(bv.bv_page);
- void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
+ void *p2;
+
+ cbv = bio_iter_iovec(check, citer);
+ p2 = page_address(cbv.bv_page);
cache_set_err_on(memcmp(p1 + bv.bv_offset,
p2 + bv.bv_offset,
@@ -133,6 +137,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
(uint64_t) bio->bi_iter.bi_sector);
kunmap_atomic(p1);
+ bio_advance_iter(check, &citer, bv.bv_len);
}
bio_free_pages(check);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index e97b0acf7b8d..db45a88c0ce9 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -24,9 +24,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio);
- bio->bi_max_vecs = bucket_pages(c);
- bio->bi_io_vec = bio->bi_inline_vecs;
+ bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6925023e12d4..1198e53d5670 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -448,13 +448,11 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
- bio_init(bio);
+ bio_init(bio, bio->bi_inline_vecs, 1);
bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev;
- bio->bi_max_vecs = 1;
- bio->bi_io_vec = bio->bi_inline_vecs;
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 5c4bddecfaf0..13b8a907006d 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -77,15 +77,13 @@ static void moving_init(struct moving_io *io)
{
struct bio *bio = &io->bio.bio;
- bio_init(bio);
+ bio_init(bio, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
- bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
- PAGE_SECTORS);
bio->bi_private = &io->cl;
- bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL);
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 40ffe5e424b3..f49c5417527d 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -404,8 +404,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (!congested &&
mode == CACHE_MODE_WRITEBACK &&
- op_is_write(bio_op(bio)) &&
- (bio->bi_opf & REQ_SYNC))
+ op_is_write(bio->bi_opf) &&
+ op_is_sync(bio->bi_opf))
goto rescale;
spin_lock(&dc->io_lock);
@@ -623,7 +623,7 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
{
struct bio *bio = &s->bio.bio;
- bio_init(bio);
+ bio_init(bio, NULL, 0);
__bio_clone_fast(bio, orig_bio);
bio->bi_end_io = request_endio;
bio->bi_private = &s->cl;
@@ -923,7 +923,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
flush->bi_bdev = bio->bi_bdev;
flush->bi_end_io = request_endio;
flush->bi_private = cl;
- bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH);
+ flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
closure_bio_submit(flush, cl);
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 849ad441cd76..2fb5bfeb43e2 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -381,7 +381,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
- uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
+ uuid_io(c, REQ_OP_READ, 0, k, cl);
if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
struct uuid_entry_v0 *u0 = (void *) c->uuids;
@@ -600,7 +600,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
ca->prio_last_buckets[bucket_nr] = bucket;
bucket_nr++;
- prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
+ prio_io(ca, bucket, REQ_OP_READ, 0);
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities");
@@ -1152,9 +1152,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
- bio_init(&dc->sb_bio);
- dc->sb_bio.bi_max_vecs = 1;
- dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
+ bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
get_page(sb_page);
@@ -1814,9 +1812,7 @@ static int cache_alloc(struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- bio_init(&ca->journal.bio);
- ca->journal.bio.bi_max_vecs = 8;
- ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
+ bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
@@ -1852,9 +1848,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
- bio_init(&ca->sb_bio);
- ca->sb_bio.bi_max_vecs = 1;
- ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
+ bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
get_page(sb_page);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index e51644e503a5..69e1ae59cab8 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -106,14 +106,13 @@ static void dirty_init(struct keybuf_key *w)
struct dirty_io *io = w->private;
struct bio *bio = &io->bio;
- bio_init(bio);
+ bio_init(bio, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
- bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
bio->bi_private = w;
- bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL);
}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 301eaf565167..629bd1a502fd 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -57,8 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
if (would_skip)
return false;
- return bio->bi_opf & REQ_SYNC ||
- in_use <= CUTOFF_WRITEBACK;
+ return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
}
static inline void bch_writeback_queue(struct cached_dev *dc)
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 125aedc3875f..262e75365cc0 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -611,9 +611,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
char *ptr;
int len;
- bio_init(&b->bio);
- b->bio.bi_io_vec = b->bio_vec;
- b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
+ bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
b->bio.bi_bdev = b->c->bdev;
b->bio.bi_end_io = inline_endio;
@@ -1316,7 +1314,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
- .bi_op_flags = WRITE_FLUSH,
+ .bi_op_flags = REQ_PREFLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a2768835d394..68a9eb4f3f36 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1135,7 +1135,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
- bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio));
+ clone->bi_opf = io->base_bio->bi_opf;
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 07fc1ad42ec5..33e71ea6cc14 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc)
};
lc->io_req.bi_op = REQ_OP_WRITE;
- lc->io_req.bi_op_flags = WRITE_FLUSH;
+ lc->io_req.bi_op_flags = REQ_PREFLUSH;
return dm_io(&lc->io_req, 1, &null_location, NULL);
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9a8b71067c6e..2ddc2d20e62d 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti)
struct mirror *m;
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
- .bi_op_flags = WRITE_FLUSH,
+ .bi_op_flags = REQ_PREFLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = ms->io_client,
@@ -656,7 +656,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct mirror *m;
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
- .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
+ .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = write_callback,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 1d0d2adc050a..b2a9e2d161e4 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -75,12 +75,6 @@ static void dm_old_start_queue(struct request_queue *q)
static void dm_mq_start_queue(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
blk_mq_start_stopped_hw_queues(q, true);
blk_mq_kick_requeue_list(q);
}
@@ -105,20 +99,10 @@ static void dm_old_stop_queue(struct request_queue *q)
static void dm_mq_stop_queue(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (blk_queue_stopped(q)) {
- spin_unlock_irqrestore(q->queue_lock, flags);
+ if (blk_mq_queue_stopped(q))
return;
- }
-
- queue_flag_set(QUEUE_FLAG_STOPPED, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- /* Avoid that requeuing could restart the queue. */
- blk_mq_cancel_requeue_work(q);
- blk_mq_stop_hw_queues(q);
+ blk_mq_quiesce_queue(q);
}
void dm_stop_queue(struct request_queue *q)
@@ -313,7 +297,7 @@ static void dm_unprep_request(struct request *rq)
if (!rq->q->mq_ops) {
rq->special = NULL;
- rq->cmd_flags &= ~REQ_DONTPREP;
+ rq->rq_flags &= ~RQF_DONTPREP;
}
if (clone)
@@ -338,12 +322,7 @@ static void dm_old_requeue_request(struct request *rq)
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (!blk_queue_stopped(q))
- blk_mq_delay_kick_requeue_list(q, msecs);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_mq_delay_kick_requeue_list(q, msecs);
}
void dm_mq_kick_requeue_list(struct mapped_device *md)
@@ -354,7 +333,7 @@ EXPORT_SYMBOL(dm_mq_kick_requeue_list);
static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
{
- blk_mq_requeue_request(rq);
+ blk_mq_requeue_request(rq, false);
__dm_mq_kick_requeue_list(rq->q, msecs);
}
@@ -431,7 +410,7 @@ static void dm_softirq_done(struct request *rq)
return;
}
- if (rq->cmd_flags & REQ_FAILED)
+ if (rq->rq_flags & RQF_FAILED)
mapped = false;
dm_done(clone, tio->error, mapped);
@@ -460,7 +439,7 @@ static void dm_complete_request(struct request *rq, int error)
*/
static void dm_kill_unmapped_request(struct request *rq, int error)
{
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
dm_complete_request(rq, error);
}
@@ -476,7 +455,7 @@ static void end_clone_request(struct request *clone, int error)
* For just cleaning up the information of the queue in which
* the clone was dispatched.
* The clone is *NOT* freed actually here because it is alloced
- * from dm own mempool (REQ_ALLOCED isn't set).
+ * from dm own mempool (RQF_ALLOCED isn't set).
*/
__blk_put_request(clone->q, clone);
}
@@ -497,7 +476,7 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
int r;
if (blk_queue_io_stat(clone->q))
- clone->cmd_flags |= REQ_IO_STAT;
+ clone->rq_flags |= RQF_IO_STAT;
clone->start_time = jiffies;
r = blk_insert_cloned_request(clone->q, clone);
@@ -633,7 +612,7 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
return BLKPREP_DEFER;
rq->special = tio;
- rq->cmd_flags |= REQ_DONTPREP;
+ rq->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
@@ -904,17 +883,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
dm_put_live_table(md, srcu_idx);
}
- /*
- * On suspend dm_stop_queue() handles stopping the blk-mq
- * request_queue BUT: even though the hw_queues are marked
- * BLK_MQ_S_STOPPED at that point there is still a race that
- * is allowing block/blk-mq.c to call ->queue_rq against a
- * hctx that it really shouldn't. The following check guards
- * against this rarity (albeit _not_ race-free).
- */
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
- return BLK_MQ_RQ_QUEUE_BUSY;
-
if (ti->type->busy && ti->type->busy(ti))
return BLK_MQ_RQ_QUEUE_BUSY;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index b8cf956b577b..b93476c3ba3f 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -741,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
/*
* Commit exceptions to disk.
*/
- if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA))
+ if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
ps->valid = 0;
/*
@@ -818,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i);
- r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA);
+ r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
if (r < 0)
return r;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ef7bf1dd6900..ffa97b742a68 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1525,9 +1525,9 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->bdev)
goto bad;
- bio_init(&md->flush_bio);
+ bio_init(&md->flush_bio, NULL, 0);
md->flush_bio.bi_bdev = md->bdev;
- bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+ md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
dm_stats_init(&md->stats);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2089d46b0eb8..f975cd08923d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -394,7 +394,7 @@ static void submit_flushes(struct work_struct *ws)
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
- bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
+ bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending);
submit_bio(bi);
rcu_read_lock();
@@ -743,7 +743,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
- bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA;
atomic_inc(&mddev->pending_writes);
submit_bio(bio);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 673efbd6fc47..4da06d813b8f 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -130,7 +130,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- bio_init(&mp_bh->bio);
+ bio_init(&mp_bh->bio, NULL, 0);
__bio_clone_fast(&mp_bh->bio, bio);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index a227a9f3ee65..8491edcfb5a6 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -685,7 +685,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
bio_reset(&log->flush_bio);
log->flush_bio.bi_bdev = log->rdev->bdev;
log->flush_bio.bi_end_io = r5l_log_flush_endio;
- bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+ log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio);
}
@@ -1053,7 +1053,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
mb->checksum = cpu_to_le32(crc);
if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
- WRITE_FUA, false)) {
+ REQ_FUA, false)) {
__free_page(page);
return -EIO;
}
@@ -1205,7 +1205,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->io_end_ios);
INIT_LIST_HEAD(&log->flushing_ios);
INIT_LIST_HEAD(&log->finished_ios);
- bio_init(&log->flush_bio);
+ bio_init(&log->flush_bio, NULL, 0);
log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
if (!log->io_kc)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 92ac251e91e6..5f9e28443c8a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -913,7 +913,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
op = REQ_OP_WRITE;
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
- op_flags = WRITE_FUA;
+ op_flags = REQ_FUA;
if (test_bit(R5_Discard, &sh->dev[i].flags))
op = REQ_OP_DISCARD;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
@@ -2004,13 +2004,8 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
for (i = 0; i < disks; i++) {
struct r5dev *dev = &sh->dev[i];
- bio_init(&dev->req);
- dev->req.bi_io_vec = &dev->vec;
- dev->req.bi_max_vecs = 1;
-
- bio_init(&dev->rreq);
- dev->rreq.bi_io_vec = &dev->rvec;
- dev->rreq.bi_max_vecs = 1;
+ bio_init(&dev->req, &dev->vec, 1);
+ bio_init(&dev->rreq, &dev->rvec, 1);
}
}
return sh;
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index aacf584f2a42..f3512404bc52 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2006,7 +2006,7 @@ static int msb_prepare_req(struct request_queue *q, struct request *req)
blk_dump_rq_flags(req, "MS unsupported request");
return BLKPREP_KILL;
}
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index c1472275fe57..fa0746d182ff 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -834,7 +834,7 @@ static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
return BLKPREP_KILL;
}
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 646d1a1fa6ca..bab3f07b1117 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1733,7 +1733,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
cmd_abort:
if (mmc_card_removed(card))
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
while (ret)
ret = blk_end_request(req, -EIO,
blk_rq_cur_bytes(req));
@@ -1741,7 +1741,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
start_new_req:
if (rqc) {
if (mmc_card_removed(card)) {
- rqc->cmd_flags |= REQ_QUIET;
+ rqc->rq_flags |= RQF_QUIET;
blk_end_request_all(rqc, -EIO);
} else {
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index cf29809f69e4..6ae6bfb8b221 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -44,7 +44,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
return BLKPREP_KILL;
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
@@ -133,7 +133,7 @@ static void mmc_request_fn(struct request_queue *q)
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
__blk_end_request_all(req, -EIO);
}
return;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index f7d37a62f874..90745a616df7 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -43,3 +43,20 @@ config NVME_RDMA
from https://github.com/linux-nvme/nvme-cli.
If unsure, say N.
+
+config NVME_FC
+ tristate "NVM Express over Fabrics FC host driver"
+ depends on BLOCK
+ depends on HAS_DMA
+ select NVME_CORE
+ select NVME_FABRICS
+ select SG_POOL
+ help
+ This provides support for the NVMe over Fabrics protocol using
+ the FC transport. This allows you to use remote block devices
+ exported using the NVMe protocol set.
+
+ To configure a NVMe over Fabrics controller use the nvme-cli tool
+ from https://github.com/linux-nvme/nvme-cli.
+
+ If unsure, say N.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 47abcec23514..f1a7d945fbb6 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
+obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o
nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
@@ -12,3 +13,5 @@ nvme-y += pci.o
nvme-fabrics-y += fabrics.o
nvme-rdma-y += rdma.o
+
+nvme-fc-y += fc.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 79e679d12f3b..35b3fee5a453 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -201,13 +201,7 @@ fail:
void nvme_requeue_req(struct request *req)
{
- unsigned long flags;
-
- blk_mq_requeue_request(req);
- spin_lock_irqsave(req->q->queue_lock, flags);
- if (!blk_queue_stopped(req->q))
- blk_mq_kick_requeue_list(req->q);
- spin_unlock_irqrestore(req->q->queue_lock, flags);
+ blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
}
EXPORT_SYMBOL_GPL(nvme_requeue_req);
@@ -227,8 +221,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
req->cmd_type = REQ_TYPE_DRV_PRIV;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
- req->cmd = (unsigned char *)cmd;
- req->cmd_len = sizeof(struct nvme_command);
+ nvme_req(req)->cmd = cmd;
return req;
}
@@ -246,8 +239,6 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd)
{
struct nvme_dsm_range *range;
- struct page *page;
- int offset;
unsigned int nr_bytes = blk_rq_bytes(req);
range = kmalloc(sizeof(*range), GFP_ATOMIC);
@@ -264,19 +255,27 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
cmnd->dsm.nr = 0;
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
- req->completion_data = range;
- page = virt_to_page(range);
- offset = offset_in_page(range);
- blk_add_request_payload(req, page, offset, sizeof(*range));
+ req->special_vec.bv_page = virt_to_page(range);
+ req->special_vec.bv_offset = offset_in_page(range);
+ req->special_vec.bv_len = sizeof(*range);
+ req->rq_flags |= RQF_SPECIAL_PAYLOAD;
- /*
- * we set __data_len back to the size of the area to be discarded
- * on disk. This allows us to report completion on the full amount
- * of blocks described by the request.
- */
- req->__data_len = nr_bytes;
+ return BLK_MQ_RQ_QUEUE_OK;
+}
- return 0;
+static inline void nvme_setup_write_zeroes(struct nvme_ns *ns,
+ struct request *req, struct nvme_command *cmnd)
+{
+ struct nvme_write_zeroes_cmd *write_zeroes = &cmnd->write_zeroes;
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ write_zeroes->opcode = nvme_cmd_write_zeroes;
+ write_zeroes->nsid = cpu_to_le32(ns->ns_id);
+ write_zeroes->slba =
+ cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ write_zeroes->length =
+ cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+ write_zeroes->control = 0;
}
static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
@@ -295,7 +294,6 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
memset(cmnd, 0, sizeof(*cmnd));
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
- cmnd->rw.command_id = req->tag;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
@@ -324,17 +322,21 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd)
{
- int ret = 0;
+ int ret = BLK_MQ_RQ_QUEUE_OK;
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
- memcpy(cmd, req->cmd, sizeof(*cmd));
+ memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
else if (req_op(req) == REQ_OP_FLUSH)
nvme_setup_flush(ns, cmd);
else if (req_op(req) == REQ_OP_DISCARD)
ret = nvme_setup_discard(ns, req, cmd);
+ else if (req_op(req) == REQ_OP_WRITE_ZEROES)
+ nvme_setup_write_zeroes(ns, req, cmd);
else
nvme_setup_rw(ns, req, cmd);
+ cmd->common.command_id = req->tag;
+
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -344,7 +346,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
* if the result is positive, it's an NVM Express status code
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+ union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head, int flags)
{
struct request *req;
@@ -355,7 +357,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
- req->special = cqe;
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -364,6 +365,8 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
}
blk_execute_rq(req->q, NULL, req, at_head);
+ if (result)
+ *result = nvme_req(req)->result;
ret = req->errors;
out:
blk_mq_free_request(req);
@@ -385,7 +388,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
u32 *result, unsigned timeout)
{
bool write = nvme_is_write(cmd);
- struct nvme_completion cqe;
struct nvme_ns *ns = q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL;
struct request *req;
@@ -398,7 +400,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
- req->special = &cqe;
if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
@@ -453,7 +454,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
blk_execute_rq(req->q, disk, req, 0);
ret = req->errors;
if (result)
- *result = le32_to_cpu(cqe.result);
+ *result = le32_to_cpu(nvme_req(req)->result.u32);
if (meta && !ret && !write) {
if (copy_to_user(meta_buffer, meta, meta_len))
ret = -EFAULT;
@@ -602,7 +603,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
void *buffer, size_t buflen, u32 *result)
{
struct nvme_command c;
- struct nvme_completion cqe;
+ union nvme_result res;
int ret;
memset(&c, 0, sizeof(c));
@@ -610,10 +611,10 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
c.features.nsid = cpu_to_le32(nsid);
c.features.fid = cpu_to_le32(fid);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0,
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
- *result = le32_to_cpu(cqe.result);
+ *result = le32_to_cpu(res.u32);
return ret;
}
@@ -621,7 +622,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
void *buffer, size_t buflen, u32 *result)
{
struct nvme_command c;
- struct nvme_completion cqe;
+ union nvme_result res;
int ret;
memset(&c, 0, sizeof(c));
@@ -629,10 +630,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe,
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
buffer, buflen, 0, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
- *result = le32_to_cpu(cqe.result);
+ *result = le32_to_cpu(res.u32);
return ret;
}
@@ -951,6 +952,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
nvme_config_discard(ns);
+ if (ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES)
+ blk_queue_max_write_zeroes_sectors(ns->queue,
+ ((u32)(USHRT_MAX + 1) * bs) >> 9);
+
blk_mq_unfreeze_queue(disk->queue);
}
@@ -1683,27 +1688,24 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_revalidate_ns(ns, &id))
goto out_free_queue;
- if (nvme_nvm_ns_supported(ns, id)) {
- if (nvme_nvm_register(ns, disk_name, node,
- &nvme_ns_attr_group)) {
- dev_warn(ctrl->dev, "%s: LightNVM init failure\n",
- __func__);
- goto out_free_id;
- }
- } else {
- disk = alloc_disk_node(0, node);
- if (!disk)
- goto out_free_id;
+ if (nvme_nvm_ns_supported(ns, id) &&
+ nvme_nvm_register(ns, disk_name, node)) {
+ dev_warn(ctrl->dev, "%s: LightNVM init failure\n", __func__);
+ goto out_free_id;
+ }
- disk->fops = &nvme_fops;
- disk->private_data = ns;
- disk->queue = ns->queue;
- disk->flags = GENHD_FL_EXT_DEVT;
- memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
- ns->disk = disk;
+ disk = alloc_disk_node(0, node);
+ if (!disk)
+ goto out_free_id;
- __nvme_revalidate_disk(disk, id);
- }
+ disk->fops = &nvme_fops;
+ disk->private_data = ns;
+ disk->queue = ns->queue;
+ disk->flags = GENHD_FL_EXT_DEVT;
+ memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
+ ns->disk = disk;
+
+ __nvme_revalidate_disk(disk, id);
mutex_lock(&ctrl->namespaces_mutex);
list_add_tail(&ns->list, &ctrl->namespaces);
@@ -1713,14 +1715,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
kfree(id);
- if (ns->ndev)
- return;
-
device_add_disk(ctrl->device, ns->disk);
if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_attr_group))
pr_warn("%s: failed to create sysfs group for identification\n",
ns->disk->disk_name);
+ if (ns->ndev && nvme_nvm_register_sysfs(ns))
+ pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
+ ns->disk->disk_name);
return;
out_free_id:
kfree(id);
@@ -1742,6 +1744,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
blk_integrity_unregister(ns->disk);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_attr_group);
+ if (ns->ndev)
+ nvme_nvm_unregister_sysfs(ns);
del_gendisk(ns->disk);
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
@@ -1905,18 +1909,25 @@ static void nvme_async_event_work(struct work_struct *work)
spin_unlock_irq(&ctrl->lock);
}
-void nvme_complete_async_event(struct nvme_ctrl *ctrl,
- struct nvme_completion *cqe)
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ union nvme_result *res)
{
- u16 status = le16_to_cpu(cqe->status) >> 1;
- u32 result = le32_to_cpu(cqe->result);
+ u32 result = le32_to_cpu(res->u32);
+ bool done = true;
- if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
+ switch (le16_to_cpu(status) >> 1) {
+ case NVME_SC_SUCCESS:
+ done = false;
+ /*FALLTHRU*/
+ case NVME_SC_ABORT_REQ:
++ctrl->event_limit;
schedule_work(&ctrl->async_event_work);
+ break;
+ default:
+ break;
}
- if (status != NVME_SC_SUCCESS)
+ if (done)
return;
switch (result & 0xff07) {
@@ -2078,14 +2089,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
struct nvme_ns *ns;
mutex_lock(&ctrl->namespaces_mutex);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
- spin_lock_irq(ns->queue->queue_lock);
- queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
- spin_unlock_irq(ns->queue->queue_lock);
-
- blk_mq_cancel_requeue_work(ns->queue);
- blk_mq_stop_hw_queues(ns->queue);
- }
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ blk_mq_quiesce_queue(ns->queue);
mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
@@ -2096,7 +2101,6 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
blk_mq_kick_requeue_list(ns->queue);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 5a3f008d3480..916d13608059 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -161,7 +161,7 @@ EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn);
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{
struct nvme_command cmd;
- struct nvme_completion cqe;
+ union nvme_result res;
int ret;
memset(&cmd, 0, sizeof(cmd));
@@ -169,11 +169,11 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
- *val = le64_to_cpu(cqe.result64);
+ *val = le64_to_cpu(res.u64);
if (unlikely(ret != 0))
dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n",
@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
struct nvme_command cmd;
- struct nvme_completion cqe;
+ union nvme_result res;
int ret;
memset(&cmd, 0, sizeof(cmd));
@@ -216,11 +216,11 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
- *val = le64_to_cpu(cqe.result64);
+ *val = le64_to_cpu(res.u64);
if (unlikely(ret != 0))
dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n",
@@ -368,7 +368,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
{
struct nvme_command cmd;
- struct nvme_completion cqe;
+ union nvme_result res;
struct nvmf_connect_data *data;
int ret;
@@ -400,16 +400,16 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
- ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe,
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res,
data, sizeof(*data), 0, NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
- nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
goto out_free_data;
}
- ctrl->cntlid = le16_to_cpu(cqe.result16);
+ ctrl->cntlid = le16_to_cpu(res.u16);
out_free_data:
kfree(data);
@@ -441,7 +441,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
{
struct nvme_command cmd;
struct nvmf_connect_data *data;
- struct nvme_completion cqe;
+ union nvme_result res;
int ret;
memset(&cmd, 0, sizeof(cmd));
@@ -459,11 +459,11 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
- ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &cqe,
+ ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), 0, qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
- nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
}
kfree(data);
@@ -576,7 +576,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
nqnlen = strlen(opts->subsysnqn);
if (nqnlen >= NVMF_NQN_SIZE) {
pr_err("%s needs to be < %d bytes\n",
- opts->subsysnqn, NVMF_NQN_SIZE);
+ opts->subsysnqn, NVMF_NQN_SIZE);
ret = -EINVAL;
goto out;
}
@@ -666,10 +666,12 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (nqnlen >= NVMF_NQN_SIZE) {
pr_err("%s needs to be < %d bytes\n",
p, NVMF_NQN_SIZE);
+ kfree(p);
ret = -EINVAL;
goto out;
}
opts->host = nvmf_host_add(p);
+ kfree(p);
if (!opts->host) {
ret = -ENOMEM;
goto out;
@@ -825,8 +827,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
out_unlock:
mutex_unlock(&nvmf_transports_mutex);
out_free_opts:
- nvmf_host_put(opts->host);
- kfree(opts);
+ nvmf_free_options(opts);
return ERR_PTR(ret);
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
new file mode 100644
index 000000000000..771e2e761872
--- /dev/null
+++ b/drivers/nvme/host/fc.c
@@ -0,0 +1,2586 @@
+/*
+ * Copyright (c) 2016 Avago Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+/* *************************** Data Structures/Defines ****************** */
+
+
+/*
+ * We handle AEN commands ourselves and don't even let the
+ * block layer know about them.
+ */
+#define NVME_FC_NR_AEN_COMMANDS 1
+#define NVME_FC_AQ_BLKMQ_DEPTH \
+ (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
+#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
+
+enum nvme_fc_queue_flags {
+ NVME_FC_Q_CONNECTED = (1 << 0),
+};
+
+#define NVMEFC_QUEUE_DELAY 3 /* ms units */
+
+struct nvme_fc_queue {
+ struct nvme_fc_ctrl *ctrl;
+ struct device *dev;
+ struct blk_mq_hw_ctx *hctx;
+ void *lldd_handle;
+ int queue_size;
+ size_t cmnd_capsule_len;
+ u32 qnum;
+ u32 rqcnt;
+ u32 seqno;
+
+ u64 connection_id;
+ atomic_t csn;
+
+ unsigned long flags;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+struct nvmefc_ls_req_op {
+ struct nvmefc_ls_req ls_req;
+
+ struct nvme_fc_ctrl *ctrl;
+ struct nvme_fc_queue *queue;
+ struct request *rq;
+
+ int ls_error;
+ struct completion ls_done;
+ struct list_head lsreq_list; /* ctrl->ls_req_list */
+ bool req_queued;
+};
+
+enum nvme_fcpop_state {
+ FCPOP_STATE_UNINIT = 0,
+ FCPOP_STATE_IDLE = 1,
+ FCPOP_STATE_ACTIVE = 2,
+ FCPOP_STATE_ABORTED = 3,
+};
+
+struct nvme_fc_fcp_op {
+ struct nvme_request nreq; /*
+ * nvme/host/core.c
+ * requires this to be
+ * the 1st element in the
+ * private structure
+ * associated with the
+ * request.
+ */
+ struct nvmefc_fcp_req fcp_req;
+
+ struct nvme_fc_ctrl *ctrl;
+ struct nvme_fc_queue *queue;
+ struct request *rq;
+
+ atomic_t state;
+ u32 rqno;
+ u32 nents;
+
+ struct nvme_fc_cmd_iu cmd_iu;
+ struct nvme_fc_ersp_iu rsp_iu;
+};
+
+struct nvme_fc_lport {
+ struct nvme_fc_local_port localport;
+
+ struct ida endp_cnt;
+ struct list_head port_list; /* nvme_fc_port_list */
+ struct list_head endp_list;
+ struct device *dev; /* physical device for dma */
+ struct nvme_fc_port_template *ops;
+ struct kref ref;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+struct nvme_fc_rport {
+ struct nvme_fc_remote_port remoteport;
+
+ struct list_head endp_list; /* for lport->endp_list */
+ struct list_head ctrl_list;
+ spinlock_t lock;
+ struct kref ref;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+enum nvme_fcctrl_state {
+ FCCTRL_INIT = 0,
+ FCCTRL_ACTIVE = 1,
+};
+
+struct nvme_fc_ctrl {
+ spinlock_t lock;
+ struct nvme_fc_queue *queues;
+ u32 queue_count;
+
+ struct device *dev;
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+ u32 cnum;
+
+ u64 association_id;
+
+ u64 cap;
+
+ struct list_head ctrl_list; /* rport->ctrl_list */
+ struct list_head ls_req_list;
+
+ struct blk_mq_tag_set admin_tag_set;
+ struct blk_mq_tag_set tag_set;
+
+ struct work_struct delete_work;
+ struct kref ref;
+ int state;
+
+ struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
+
+ struct nvme_ctrl ctrl;
+};
+
+static inline struct nvme_fc_ctrl *
+to_fc_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
+}
+
+static inline struct nvme_fc_lport *
+localport_to_lport(struct nvme_fc_local_port *portptr)
+{
+ return container_of(portptr, struct nvme_fc_lport, localport);
+}
+
+static inline struct nvme_fc_rport *
+remoteport_to_rport(struct nvme_fc_remote_port *portptr)
+{
+ return container_of(portptr, struct nvme_fc_rport, remoteport);
+}
+
+static inline struct nvmefc_ls_req_op *
+ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
+{
+ return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
+}
+
+static inline struct nvme_fc_fcp_op *
+fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
+{
+ return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
+}
+
+
+
+/* *************************** Globals **************************** */
+
+
+static DEFINE_SPINLOCK(nvme_fc_lock);
+
+static LIST_HEAD(nvme_fc_lport_list);
+static DEFINE_IDA(nvme_fc_local_port_cnt);
+static DEFINE_IDA(nvme_fc_ctrl_cnt);
+
+static struct workqueue_struct *nvme_fc_wq;
+
+
+
+/* *********************** FC-NVME Port Management ************************ */
+
+static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
+static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
+ struct nvme_fc_queue *, unsigned int);
+
+
+/**
+ * nvme_fc_register_localport - transport entry point called by an
+ * LLDD to register the existence of a NVME
+ * host FC port.
+ * @pinfo: pointer to information about the port to be registered
+ * @template: LLDD entrypoints and operational parameters for the port
+ * @dev: physical hardware device node port corresponds to. Will be
+ * used for DMA mappings
+ * @lport_p: pointer to a local port pointer. Upon success, the routine
+ * will allocate a nvme_fc_local_port structure and place its
+ * address in the local port pointer. Upon failure, local port
+ * pointer will be set to 0.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_port_template *template,
+ struct device *dev,
+ struct nvme_fc_local_port **portptr)
+{
+ struct nvme_fc_lport *newrec;
+ unsigned long flags;
+ int ret, idx;
+
+ if (!template->localport_delete || !template->remoteport_delete ||
+ !template->ls_req || !template->fcp_io ||
+ !template->ls_abort || !template->fcp_abort ||
+ !template->max_hw_queues || !template->max_sgl_segments ||
+ !template->max_dif_sgl_segments || !template->dma_boundary) {
+ ret = -EINVAL;
+ goto out_reghost_failed;
+ }
+
+ newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
+ GFP_KERNEL);
+ if (!newrec) {
+ ret = -ENOMEM;
+ goto out_reghost_failed;
+ }
+
+ idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_fail_kfree;
+ }
+
+ if (!get_device(dev) && dev) {
+ ret = -ENODEV;
+ goto out_ida_put;
+ }
+
+ INIT_LIST_HEAD(&newrec->port_list);
+ INIT_LIST_HEAD(&newrec->endp_list);
+ kref_init(&newrec->ref);
+ newrec->ops = template;
+ newrec->dev = dev;
+ ida_init(&newrec->endp_cnt);
+ newrec->localport.private = &newrec[1];
+ newrec->localport.node_name = pinfo->node_name;
+ newrec->localport.port_name = pinfo->port_name;
+ newrec->localport.port_role = pinfo->port_role;
+ newrec->localport.port_id = pinfo->port_id;
+ newrec->localport.port_state = FC_OBJSTATE_ONLINE;
+ newrec->localport.port_num = idx;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ if (dev)
+ dma_set_seg_boundary(dev, template->dma_boundary);
+
+ *portptr = &newrec->localport;
+ return 0;
+
+out_ida_put:
+ ida_simple_remove(&nvme_fc_local_port_cnt, idx);
+out_fail_kfree:
+ kfree(newrec);
+out_reghost_failed:
+ *portptr = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
+
+static void
+nvme_fc_free_lport(struct kref *ref)
+{
+ struct nvme_fc_lport *lport =
+ container_of(ref, struct nvme_fc_lport, ref);
+ unsigned long flags;
+
+ WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
+ WARN_ON(!list_empty(&lport->endp_list));
+
+ /* remove from transport list */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_del(&lport->port_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ /* let the LLDD know we've finished tearing it down */
+ lport->ops->localport_delete(&lport->localport);
+
+ ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
+ ida_destroy(&lport->endp_cnt);
+
+ put_device(lport->dev);
+
+ kfree(lport);
+}
+
+static void
+nvme_fc_lport_put(struct nvme_fc_lport *lport)
+{
+ kref_put(&lport->ref, nvme_fc_free_lport);
+}
+
+static int
+nvme_fc_lport_get(struct nvme_fc_lport *lport)
+{
+ return kref_get_unless_zero(&lport->ref);
+}
+
+/**
+ * nvme_fc_unregister_localport - transport entry point called by an
+ * LLDD to deregister/remove a previously
+ * registered a NVME host FC port.
+ * @localport: pointer to the (registered) local port that is to be
+ * deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
+{
+ struct nvme_fc_lport *lport = localport_to_lport(portptr);
+ unsigned long flags;
+
+ if (!portptr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+
+ if (portptr->port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ return -EINVAL;
+ }
+ portptr->port_state = FC_OBJSTATE_DELETED;
+
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ nvme_fc_lport_put(lport);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
+
+/**
+ * nvme_fc_register_remoteport - transport entry point called by an
+ * LLDD to register the existence of a NVME
+ * subsystem FC port on its fabric.
+ * @localport: pointer to the (registered) local port that the remote
+ * subsystem port is connected to.
+ * @pinfo: pointer to information about the port to be registered
+ * @rport_p: pointer to a remote port pointer. Upon success, the routine
+ * will allocate a nvme_fc_remote_port structure and place its
+ * address in the remote port pointer. Upon failure, remote port
+ * pointer will be set to 0.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
+ struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_remote_port **portptr)
+{
+ struct nvme_fc_lport *lport = localport_to_lport(localport);
+ struct nvme_fc_rport *newrec;
+ unsigned long flags;
+ int ret, idx;
+
+ newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
+ GFP_KERNEL);
+ if (!newrec) {
+ ret = -ENOMEM;
+ goto out_reghost_failed;
+ }
+
+ if (!nvme_fc_lport_get(lport)) {
+ ret = -ESHUTDOWN;
+ goto out_kfree_rport;
+ }
+
+ idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_lport_put;
+ }
+
+ INIT_LIST_HEAD(&newrec->endp_list);
+ INIT_LIST_HEAD(&newrec->ctrl_list);
+ kref_init(&newrec->ref);
+ spin_lock_init(&newrec->lock);
+ newrec->remoteport.localport = &lport->localport;
+ newrec->remoteport.private = &newrec[1];
+ newrec->remoteport.port_role = pinfo->port_role;
+ newrec->remoteport.node_name = pinfo->node_name;
+ newrec->remoteport.port_name = pinfo->port_name;
+ newrec->remoteport.port_id = pinfo->port_id;
+ newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
+ newrec->remoteport.port_num = idx;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_add_tail(&newrec->endp_list, &lport->endp_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ *portptr = &newrec->remoteport;
+ return 0;
+
+out_lport_put:
+ nvme_fc_lport_put(lport);
+out_kfree_rport:
+ kfree(newrec);
+out_reghost_failed:
+ *portptr = NULL;
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
+
+static void
+nvme_fc_free_rport(struct kref *ref)
+{
+ struct nvme_fc_rport *rport =
+ container_of(ref, struct nvme_fc_rport, ref);
+ struct nvme_fc_lport *lport =
+ localport_to_lport(rport->remoteport.localport);
+ unsigned long flags;
+
+ WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
+ WARN_ON(!list_empty(&rport->ctrl_list));
+
+ /* remove from lport list */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_del(&rport->endp_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ /* let the LLDD know we've finished tearing it down */
+ lport->ops->remoteport_delete(&rport->remoteport);
+
+ ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
+
+ kfree(rport);
+
+ nvme_fc_lport_put(lport);
+}
+
+static void
+nvme_fc_rport_put(struct nvme_fc_rport *rport)
+{
+ kref_put(&rport->ref, nvme_fc_free_rport);
+}
+
+static int
+nvme_fc_rport_get(struct nvme_fc_rport *rport)
+{
+ return kref_get_unless_zero(&rport->ref);
+}
+
+/**
+ * nvme_fc_unregister_remoteport - transport entry point called by an
+ * LLDD to deregister/remove a previously
+ * registered a NVME subsystem FC port.
+ * @remoteport: pointer to the (registered) remote port that is to be
+ * deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+
+ if (!portptr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ if (portptr->port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ return -EINVAL;
+ }
+ portptr->port_state = FC_OBJSTATE_DELETED;
+
+ /* tear down all associations to the remote port */
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
+ __nvme_fc_del_ctrl(ctrl);
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ nvme_fc_rport_put(rport);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
+
+
+/* *********************** FC-NVME DMA Handling **************************** */
+
+/*
+ * The fcloop device passes in a NULL device pointer. Real LLD's will
+ * pass in a valid device pointer. If NULL is passed to the dma mapping
+ * routines, depending on the platform, it may or may not succeed, and
+ * may crash.
+ *
+ * As such:
+ * Wrapper all the dma routines and check the dev pointer.
+ *
+ * If simple mappings (return just a dma address, we'll noop them,
+ * returning a dma address of 0.
+ *
+ * On more complex mappings (dma_map_sg), a pseudo routine fills
+ * in the scatter list, setting all dma addresses to 0.
+ */
+
+static inline dma_addr_t
+fc_dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
+}
+
+static inline int
+fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dev ? dma_mapping_error(dev, dma_addr) : 0;
+}
+
+static inline void
+fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_single(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+/* pseudo dma_map_sg call */
+static int
+fc_map_sg(struct scatterlist *sg, int nents)
+{
+ struct scatterlist *s;
+ int i;
+
+ WARN_ON(nents == 0 || sg[0].length == 0);
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = 0L;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ }
+ return nents;
+}
+
+static inline int
+fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
+}
+
+static inline void
+fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_sg(dev, sg, nents, dir);
+}
+
+
+/* *********************** FC-NVME LS Handling **************************** */
+
+static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
+static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
+
+
+static void
+__nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl,
+ struct nvmefc_ls_req_op *lsop)
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ if (!lsop->req_queued) {
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ return;
+ }
+
+ list_del(&lsop->lsreq_list);
+
+ lsop->req_queued = false;
+
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ fc_dma_unmap_single(ctrl->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+
+ nvme_fc_ctrl_put(ctrl);
+}
+
+static int
+__nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl,
+ struct nvmefc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+ int ret;
+
+ if (!nvme_fc_ctrl_get(ctrl))
+ return -ESHUTDOWN;
+
+ lsreq->done = done;
+ lsop->ctrl = ctrl;
+ lsop->req_queued = false;
+ INIT_LIST_HEAD(&lsop->lsreq_list);
+ init_completion(&lsop->ls_done);
+
+ lsreq->rqstdma = fc_dma_map_single(ctrl->dev, lsreq->rqstaddr,
+ lsreq->rqstlen + lsreq->rsplen,
+ DMA_BIDIRECTIONAL);
+ if (fc_dma_mapping_error(ctrl->dev, lsreq->rqstdma)) {
+ nvme_fc_ctrl_put(ctrl);
+ dev_err(ctrl->dev,
+ "els request command failed EFAULT.\n");
+ return -EFAULT;
+ }
+ lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ list_add_tail(&lsop->lsreq_list, &ctrl->ls_req_list);
+
+ lsop->req_queued = true;
+
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ ret = ctrl->lport->ops->ls_req(&ctrl->lport->localport,
+ &ctrl->rport->remoteport, lsreq);
+ if (ret)
+ lsop->ls_error = ret;
+
+ return ret;
+}
+
+static void
+nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
+{
+ struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
+
+ lsop->ls_error = status;
+ complete(&lsop->ls_done);
+}
+
+static int
+nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
+ int ret;
+
+ ret = __nvme_fc_send_ls_req(ctrl, lsop, nvme_fc_send_ls_req_done);
+
+ if (!ret)
+ /*
+ * No timeout/not interruptible as we need the struct
+ * to exist until the lldd calls us back. Thus mandate
+ * wait until driver calls back. lldd responsible for
+ * the timeout action
+ */
+ wait_for_completion(&lsop->ls_done);
+
+ __nvme_fc_finish_ls_req(ctrl, lsop);
+
+ if (ret) {
+ dev_err(ctrl->dev,
+ "ls request command failed (%d).\n", ret);
+ return ret;
+ }
+
+ /* ACC or RJT payload ? */
+ if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
+ return -ENXIO;
+
+ return 0;
+}
+
+static void
+nvme_fc_send_ls_req_async(struct nvme_fc_ctrl *ctrl,
+ struct nvmefc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ int ret;
+
+ ret = __nvme_fc_send_ls_req(ctrl, lsop, done);
+
+ /* don't wait for completion */
+
+ if (ret)
+ done(&lsop->ls_req, ret);
+}
+
+/* Validation Error indexes into the string table below */
+enum {
+ VERR_NO_ERROR = 0,
+ VERR_LSACC = 1,
+ VERR_LSDESC_RQST = 2,
+ VERR_LSDESC_RQST_LEN = 3,
+ VERR_ASSOC_ID = 4,
+ VERR_ASSOC_ID_LEN = 5,
+ VERR_CONN_ID = 6,
+ VERR_CONN_ID_LEN = 7,
+ VERR_CR_ASSOC = 8,
+ VERR_CR_ASSOC_ACC_LEN = 9,
+ VERR_CR_CONN = 10,
+ VERR_CR_CONN_ACC_LEN = 11,
+ VERR_DISCONN = 12,
+ VERR_DISCONN_ACC_LEN = 13,
+};
+
+static char *validation_errors[] = {
+ "OK",
+ "Not LS_ACC",
+ "Not LSDESC_RQST",
+ "Bad LSDESC_RQST Length",
+ "Not Association ID",
+ "Bad Association ID Length",
+ "Not Connection ID",
+ "Bad Connection ID Length",
+ "Not CR_ASSOC Rqst",
+ "Bad CR_ASSOC ACC Length",
+ "Not CR_CONN Rqst",
+ "Bad CR_CONN ACC Length",
+ "Not Disconnect Rqst",
+ "Bad Disconnect ACC Length",
+};
+
+static int
+nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
+{
+ struct nvmefc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
+ struct fcnvme_ls_cr_assoc_acc *assoc_acc;
+ int ret, fcret = 0;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ ctrl->lport->ops->lsrqst_priv_sz +
+ sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
+ if (!lsop) {
+ ret = -ENOMEM;
+ goto out_no_memory;
+ }
+ lsreq = &lsop->ls_req;
+
+ lsreq->private = (void *)&lsop[1];
+ assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
+ (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
+
+ assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
+ assoc_rqst->desc_list_len =
+ cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
+
+ assoc_rqst->assoc_cmd.desc_tag =
+ cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
+ assoc_rqst->assoc_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
+
+ assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
+ assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
+ /* Linux supports only Dynamic controllers */
+ assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
+ memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
+ min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
+ strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
+ min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
+ strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
+ min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
+
+ lsop->queue = queue;
+ lsreq->rqstaddr = assoc_rqst;
+ lsreq->rqstlen = sizeof(*assoc_rqst);
+ lsreq->rspaddr = assoc_acc;
+ lsreq->rsplen = sizeof(*assoc_acc);
+ lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+
+ ret = nvme_fc_send_ls_req(ctrl, lsop);
+ if (ret)
+ goto out_free_buffer;
+
+ /* process connect LS completion */
+
+ /* validate the ACC response */
+ if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
+ fcret = VERR_LSACC;
+ if (assoc_acc->hdr.desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_cr_assoc_acc)))
+ fcret = VERR_CR_ASSOC_ACC_LEN;
+ if (assoc_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
+ fcret = VERR_LSDESC_RQST;
+ else if (assoc_acc->hdr.rqst.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
+ fcret = VERR_LSDESC_RQST_LEN;
+ else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
+ fcret = VERR_CR_ASSOC;
+ else if (assoc_acc->associd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ fcret = VERR_ASSOC_ID;
+ else if (assoc_acc->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ fcret = VERR_ASSOC_ID_LEN;
+ else if (assoc_acc->connectid.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CONN_ID))
+ fcret = VERR_CONN_ID;
+ else if (assoc_acc->connectid.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
+ fcret = VERR_CONN_ID_LEN;
+
+ if (fcret) {
+ ret = -EBADF;
+ dev_err(ctrl->dev,
+ "q %d connect failed: %s\n",
+ queue->qnum, validation_errors[fcret]);
+ } else {
+ ctrl->association_id =
+ be64_to_cpu(assoc_acc->associd.association_id);
+ queue->connection_id =
+ be64_to_cpu(assoc_acc->connectid.connection_id);
+ set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+ }
+
+out_free_buffer:
+ kfree(lsop);
+out_no_memory:
+ if (ret)
+ dev_err(ctrl->dev,
+ "queue %d connect admin queue failed (%d).\n",
+ queue->qnum, ret);
+ return ret;
+}
+
+static int
+nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+ u16 qsize, u16 ersp_ratio)
+{
+ struct nvmefc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ struct fcnvme_ls_cr_conn_rqst *conn_rqst;
+ struct fcnvme_ls_cr_conn_acc *conn_acc;
+ int ret, fcret = 0;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ ctrl->lport->ops->lsrqst_priv_sz +
+ sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
+ if (!lsop) {
+ ret = -ENOMEM;
+ goto out_no_memory;
+ }
+ lsreq = &lsop->ls_req;
+
+ lsreq->private = (void *)&lsop[1];
+ conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
+ (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
+
+ conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
+ conn_rqst->desc_list_len = cpu_to_be32(
+ sizeof(struct fcnvme_lsdesc_assoc_id) +
+ sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
+
+ conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ conn_rqst->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+ conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
+ conn_rqst->connect_cmd.desc_tag =
+ cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
+ conn_rqst->connect_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
+ conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
+ conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
+ conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
+
+ lsop->queue = queue;
+ lsreq->rqstaddr = conn_rqst;
+ lsreq->rqstlen = sizeof(*conn_rqst);
+ lsreq->rspaddr = conn_acc;
+ lsreq->rsplen = sizeof(*conn_acc);
+ lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+
+ ret = nvme_fc_send_ls_req(ctrl, lsop);
+ if (ret)
+ goto out_free_buffer;
+
+ /* process connect LS completion */
+
+ /* validate the ACC response */
+ if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
+ fcret = VERR_LSACC;
+ if (conn_acc->hdr.desc_list_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
+ fcret = VERR_CR_CONN_ACC_LEN;
+ if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
+ fcret = VERR_LSDESC_RQST;
+ else if (conn_acc->hdr.rqst.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
+ fcret = VERR_LSDESC_RQST_LEN;
+ else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
+ fcret = VERR_CR_CONN;
+ else if (conn_acc->connectid.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CONN_ID))
+ fcret = VERR_CONN_ID;
+ else if (conn_acc->connectid.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
+ fcret = VERR_CONN_ID_LEN;
+
+ if (fcret) {
+ ret = -EBADF;
+ dev_err(ctrl->dev,
+ "q %d connect failed: %s\n",
+ queue->qnum, validation_errors[fcret]);
+ } else {
+ queue->connection_id =
+ be64_to_cpu(conn_acc->connectid.connection_id);
+ set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+ }
+
+out_free_buffer:
+ kfree(lsop);
+out_no_memory:
+ if (ret)
+ dev_err(ctrl->dev,
+ "queue %d connect command failed (%d).\n",
+ queue->qnum, ret);
+ return ret;
+}
+
+static void
+nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
+{
+ struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
+ struct nvme_fc_ctrl *ctrl = lsop->ctrl;
+
+ __nvme_fc_finish_ls_req(ctrl, lsop);
+
+ if (status)
+ dev_err(ctrl->dev,
+ "disconnect assoc ls request command failed (%d).\n",
+ status);
+
+ /* fc-nvme iniator doesn't care about success or failure of cmd */
+
+ kfree(lsop);
+}
+
+/*
+ * This routine sends a FC-NVME LS to disconnect (aka terminate)
+ * the FC-NVME Association. Terminating the association also
+ * terminates the FC-NVME connections (per queue, both admin and io
+ * queues) that are part of the association. E.g. things are torn
+ * down, and the related FC-NVME Association ID and Connection IDs
+ * become invalid.
+ *
+ * The behavior of the fc-nvme initiator is such that it's
+ * understanding of the association and connections will implicitly
+ * be torn down. The action is implicit as it may be due to a loss of
+ * connectivity with the fc-nvme target, so you may never get a
+ * response even if you tried. As such, the action of this routine
+ * is to asynchronously send the LS, ignore any results of the LS, and
+ * continue on with terminating the association. If the fc-nvme target
+ * is present and receives the LS, it too can tear down.
+ */
+static void
+nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
+{
+ struct fcnvme_ls_disconnect_rqst *discon_rqst;
+ struct fcnvme_ls_disconnect_acc *discon_acc;
+ struct nvmefc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ ctrl->lport->ops->lsrqst_priv_sz +
+ sizeof(*discon_rqst) + sizeof(*discon_acc)),
+ GFP_KERNEL);
+ if (!lsop)
+ /* couldn't sent it... too bad */
+ return;
+
+ lsreq = &lsop->ls_req;
+
+ lsreq->private = (void *)&lsop[1];
+ discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
+ (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
+
+ discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
+ discon_rqst->desc_list_len = cpu_to_be32(
+ sizeof(struct fcnvme_lsdesc_assoc_id) +
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+
+ discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ discon_rqst->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+
+ discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
+
+ discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
+ FCNVME_LSDESC_DISCONN_CMD);
+ discon_rqst->discon_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+ discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
+ discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
+
+ lsreq->rqstaddr = discon_rqst;
+ lsreq->rqstlen = sizeof(*discon_rqst);
+ lsreq->rspaddr = discon_acc;
+ lsreq->rsplen = sizeof(*discon_acc);
+ lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+
+ nvme_fc_send_ls_req_async(ctrl, lsop, nvme_fc_disconnect_assoc_done);
+
+ /* only meaningful part to terminating the association */
+ ctrl->association_id = 0;
+}
+
+
+/* *********************** NVME Ctrl Routines **************************** */
+
+
+static int
+nvme_fc_reinit_request(void *data, struct request *rq)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+
+ memset(cmdiu, 0, sizeof(*cmdiu));
+ cmdiu->scsi_id = NVME_CMD_SCSI_ID;
+ cmdiu->fc_id = NVME_CMD_FC_ID;
+ cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+ memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
+
+ return 0;
+}
+
+static void
+__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_fcp_op *op)
+{
+ fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
+ sizeof(op->rsp_iu), DMA_FROM_DEVICE);
+ fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
+ sizeof(op->cmd_iu), DMA_TO_DEVICE);
+
+ atomic_set(&op->state, FCPOP_STATE_UNINIT);
+}
+
+static void
+nvme_fc_exit_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+
+ return __nvme_fc_exit_request(data, op);
+}
+
+static void
+nvme_fc_exit_aen_ops(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
+ int i;
+
+ for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ if (atomic_read(&aen_op->state) == FCPOP_STATE_UNINIT)
+ continue;
+ __nvme_fc_exit_request(ctrl, aen_op);
+ nvme_fc_ctrl_put(ctrl);
+ }
+}
+
+void
+nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
+{
+ struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
+ struct request *rq = op->rq;
+ struct nvmefc_fcp_req *freq = &op->fcp_req;
+ struct nvme_fc_ctrl *ctrl = op->ctrl;
+ struct nvme_fc_queue *queue = op->queue;
+ struct nvme_completion *cqe = &op->rsp_iu.cqe;
+ u16 status;
+
+ /*
+ * WARNING:
+ * The current linux implementation of a nvme controller
+ * allocates a single tag set for all io queues and sizes
+ * the io queues to fully hold all possible tags. Thus, the
+ * implementation does not reference or care about the sqhd
+ * value as it never needs to use the sqhd/sqtail pointers
+ * for submission pacing.
+ *
+ * This affects the FC-NVME implementation in two ways:
+ * 1) As the value doesn't matter, we don't need to waste
+ * cycles extracting it from ERSPs and stamping it in the
+ * cases where the transport fabricates CQEs on successful
+ * completions.
+ * 2) The FC-NVME implementation requires that delivery of
+ * ERSP completions are to go back to the nvme layer in order
+ * relative to the rsn, such that the sqhd value will always
+ * be "in order" for the nvme layer. As the nvme layer in
+ * linux doesn't care about sqhd, there's no need to return
+ * them in order.
+ *
+ * Additionally:
+ * As the core nvme layer in linux currently does not look at
+ * every field in the cqe - in cases where the FC transport must
+ * fabricate a CQE, the following fields will not be set as they
+ * are not referenced:
+ * cqe.sqid, cqe.sqhd, cqe.command_id
+ */
+
+ fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
+ sizeof(op->rsp_iu), DMA_FROM_DEVICE);
+
+ if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
+ status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+ else
+ status = freq->status;
+
+ /*
+ * For the linux implementation, if we have an unsuccesful
+ * status, they blk-mq layer can typically be called with the
+ * non-zero status and the content of the cqe isn't important.
+ */
+ if (status)
+ goto done;
+
+ /*
+ * command completed successfully relative to the wire
+ * protocol. However, validate anything received and
+ * extract the status and result from the cqe (create it
+ * where necessary).
+ */
+
+ switch (freq->rcv_rsplen) {
+
+ case 0:
+ case NVME_FC_SIZEOF_ZEROS_RSP:
+ /*
+ * No response payload or 12 bytes of payload (which
+ * should all be zeros) are considered successful and
+ * no payload in the CQE by the transport.
+ */
+ if (freq->transferred_length !=
+ be32_to_cpu(op->cmd_iu.data_len)) {
+ status = -EIO;
+ goto done;
+ }
+ op->nreq.result.u64 = 0;
+ break;
+
+ case sizeof(struct nvme_fc_ersp_iu):
+ /*
+ * The ERSP IU contains a full completion with CQE.
+ * Validate ERSP IU and look at cqe.
+ */
+ if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
+ (freq->rcv_rsplen / 4) ||
+ be32_to_cpu(op->rsp_iu.xfrd_len) !=
+ freq->transferred_length ||
+ op->rqno != le16_to_cpu(cqe->command_id))) {
+ status = -EIO;
+ goto done;
+ }
+ op->nreq.result = cqe->result;
+ status = le16_to_cpu(cqe->status) >> 1;
+ break;
+
+ default:
+ status = -EIO;
+ goto done;
+ }
+
+done:
+ if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) {
+ nvme_complete_async_event(&queue->ctrl->ctrl, status,
+ &op->nreq.result);
+ nvme_fc_ctrl_put(ctrl);
+ return;
+ }
+
+ blk_mq_complete_request(rq, status);
+}
+
+static int
+__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
+ struct request *rq, u32 rqno)
+{
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ int ret = 0;
+
+ memset(op, 0, sizeof(*op));
+ op->fcp_req.cmdaddr = &op->cmd_iu;
+ op->fcp_req.cmdlen = sizeof(op->cmd_iu);
+ op->fcp_req.rspaddr = &op->rsp_iu;
+ op->fcp_req.rsplen = sizeof(op->rsp_iu);
+ op->fcp_req.done = nvme_fc_fcpio_done;
+ op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
+ op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
+ op->ctrl = ctrl;
+ op->queue = queue;
+ op->rq = rq;
+ op->rqno = rqno;
+
+ cmdiu->scsi_id = NVME_CMD_SCSI_ID;
+ cmdiu->fc_id = NVME_CMD_FC_ID;
+ cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+
+ op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
+ &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
+ dev_err(ctrl->dev,
+ "FCP Op failed - cmdiu dma mapping failed.\n");
+ ret = EFAULT;
+ goto out_on_error;
+ }
+
+ op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
+ &op->rsp_iu, sizeof(op->rsp_iu),
+ DMA_FROM_DEVICE);
+ if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
+ dev_err(ctrl->dev,
+ "FCP Op failed - rspiu dma mapping failed.\n");
+ ret = EFAULT;
+ }
+
+ atomic_set(&op->state, FCPOP_STATE_IDLE);
+out_on_error:
+ return ret;
+}
+
+static int
+nvme_fc_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ struct nvme_fc_ctrl *ctrl = data;
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
+
+ return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
+}
+
+static int
+nvme_fc_init_admin_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ struct nvme_fc_ctrl *ctrl = data;
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_queue *queue = &ctrl->queues[0];
+
+ return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
+}
+
+static int
+nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_fcp_op *aen_op;
+ struct nvme_fc_cmd_iu *cmdiu;
+ struct nvme_command *sqe;
+ int i, ret;
+
+ aen_op = ctrl->aen_ops;
+ for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ cmdiu = &aen_op->cmd_iu;
+ sqe = &cmdiu->sqe;
+ ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
+ aen_op, (struct request *)NULL,
+ (AEN_CMDID_BASE + i));
+ if (ret)
+ return ret;
+
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->common.opcode = nvme_admin_async_event;
+ sqe->common.command_id = AEN_CMDID_BASE + i;
+ }
+ return 0;
+}
+
+
+static inline void
+__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
+ unsigned int qidx)
+{
+ struct nvme_fc_queue *queue = &ctrl->queues[qidx];
+
+ hctx->driver_data = queue;
+ queue->hctx = hctx;
+}
+
+static int
+nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_fc_ctrl *ctrl = data;
+
+ __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
+
+ return 0;
+}
+
+static int
+nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_fc_ctrl *ctrl = data;
+
+ __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
+
+ return 0;
+}
+
+static void
+nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
+{
+ struct nvme_fc_queue *queue;
+
+ queue = &ctrl->queues[idx];
+ memset(queue, 0, sizeof(*queue));
+ queue->ctrl = ctrl;
+ queue->qnum = idx;
+ atomic_set(&queue->csn, 1);
+ queue->dev = ctrl->dev;
+
+ if (idx > 0)
+ queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
+ else
+ queue->cmnd_capsule_len = sizeof(struct nvme_command);
+
+ queue->queue_size = queue_size;
+
+ /*
+ * Considered whether we should allocate buffers for all SQEs
+ * and CQEs and dma map them - mapping their respective entries
+ * into the request structures (kernel vm addr and dma address)
+ * thus the driver could use the buffers/mappings directly.
+ * It only makes sense if the LLDD would use them for its
+ * messaging api. It's very unlikely most adapter api's would use
+ * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
+ * structures were used instead.
+ */
+}
+
+/*
+ * This routine terminates a queue at the transport level.
+ * The transport has already ensured that all outstanding ios on
+ * the queue have been terminated.
+ * The transport will send a Disconnect LS request to terminate
+ * the queue's connection. Termination of the admin queue will also
+ * terminate the association at the target.
+ */
+static void
+nvme_fc_free_queue(struct nvme_fc_queue *queue)
+{
+ if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
+ return;
+
+ /*
+ * Current implementation never disconnects a single queue.
+ * It always terminates a whole association. So there is never
+ * a disconnect(queue) LS sent to the target.
+ */
+
+ queue->connection_id = 0;
+ clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+}
+
+static void
+__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, unsigned int qidx)
+{
+ if (ctrl->lport->ops->delete_queue)
+ ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
+ queue->lldd_handle);
+ queue->lldd_handle = NULL;
+}
+
+static void
+nvme_fc_destroy_admin_queue(struct nvme_fc_ctrl *ctrl)
+{
+ __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_fc_free_queue(&ctrl->queues[0]);
+}
+
+static void
+nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_fc_free_queue(&ctrl->queues[i]);
+}
+
+static int
+__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
+{
+ int ret = 0;
+
+ queue->lldd_handle = NULL;
+ if (ctrl->lport->ops->create_queue)
+ ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
+ qidx, qsize, &queue->lldd_handle);
+
+ return ret;
+}
+
+static void
+nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
+ int i;
+
+ for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
+ __nvme_fc_delete_hw_queue(ctrl, queue, i);
+}
+
+static int
+nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
+{
+ struct nvme_fc_queue *queue = &ctrl->queues[1];
+ int i, j, ret;
+
+ for (i = 1; i < ctrl->queue_count; i++, queue++) {
+ ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
+ if (ret) {
+ for (j = i-1; j >= 0; j--)
+ __nvme_fc_delete_hw_queue(ctrl,
+ &ctrl->queues[j], j);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
+{
+ int i, ret = 0;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
+ (qsize / 5));
+ if (ret)
+ break;
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void
+nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+}
+
+static void
+nvme_fc_ctrl_free(struct kref *ref)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(ref, struct nvme_fc_ctrl, ref);
+ unsigned long flags;
+
+ if (ctrl->state != FCCTRL_INIT) {
+ /* remove from rport list */
+ spin_lock_irqsave(&ctrl->rport->lock, flags);
+ list_del(&ctrl->ctrl_list);
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
+ }
+
+ put_device(ctrl->dev);
+ nvme_fc_rport_put(ctrl->rport);
+
+ kfree(ctrl->queues);
+ ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+ nvmf_free_options(ctrl->ctrl.opts);
+ kfree(ctrl);
+}
+
+static void
+nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
+{
+ kref_put(&ctrl->ref, nvme_fc_ctrl_free);
+}
+
+static int
+nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
+{
+ return kref_get_unless_zero(&ctrl->ref);
+}
+
+/*
+ * All accesses from nvme core layer done - can now free the
+ * controller. Called after last nvme_put_ctrl() call
+ */
+static void
+nvme_fc_free_nvme_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+
+ WARN_ON(nctrl != &ctrl->ctrl);
+
+ /*
+ * Tear down the association, which will generate link
+ * traffic to terminate connections
+ */
+
+ if (ctrl->state != FCCTRL_INIT) {
+ /* send a Disconnect(association) LS to fc-nvme target */
+ nvme_fc_xmt_disconnect_assoc(ctrl);
+
+ if (ctrl->ctrl.tagset) {
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ nvme_fc_delete_hw_io_queues(ctrl);
+ nvme_fc_free_io_queues(ctrl);
+ }
+
+ nvme_fc_exit_aen_ops(ctrl);
+
+ nvme_fc_destroy_admin_queue(ctrl);
+ }
+
+ nvme_fc_ctrl_put(ctrl);
+}
+
+
+static int
+__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
+{
+ int state;
+
+ state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
+ if (state != FCPOP_STATE_ACTIVE) {
+ atomic_set(&op->state, state);
+ return -ECANCELED; /* fail */
+ }
+
+ ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
+ &ctrl->rport->remoteport,
+ op->queue->lldd_handle,
+ &op->fcp_req);
+
+ return 0;
+}
+
+enum blk_eh_timer_return
+nvme_fc_timeout(struct request *rq, bool reserved)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_ctrl *ctrl = op->ctrl;
+ int ret;
+
+ if (reserved)
+ return BLK_EH_RESET_TIMER;
+
+ ret = __nvme_fc_abort_op(ctrl, op);
+ if (ret)
+ /* io wasn't active to abort consider it done */
+ return BLK_EH_HANDLED;
+
+ /*
+ * TODO: force a controller reset
+ * when that happens, queues will be torn down and outstanding
+ * ios will be terminated, and the above abort, on a single io
+ * will no longer be needed.
+ */
+
+ return BLK_EH_HANDLED;
+}
+
+static int
+nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
+ struct nvme_fc_fcp_op *op)
+{
+ struct nvmefc_fcp_req *freq = &op->fcp_req;
+ u32 map_len = nvme_map_len(rq);
+ enum dma_data_direction dir;
+ int ret;
+
+ freq->sg_cnt = 0;
+
+ if (!map_len)
+ return 0;
+
+ freq->sg_table.sgl = freq->first_sgl;
+ ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
+ freq->sg_table.sgl);
+ if (ret)
+ return -ENOMEM;
+
+ op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
+ WARN_ON(op->nents > rq->nr_phys_segments);
+ dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
+ op->nents, dir);
+ if (unlikely(freq->sg_cnt <= 0)) {
+ sg_free_table_chained(&freq->sg_table, true);
+ freq->sg_cnt = 0;
+ return -EFAULT;
+ }
+
+ /*
+ * TODO: blk_integrity_rq(rq) for DIF
+ */
+ return 0;
+}
+
+static void
+nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
+ struct nvme_fc_fcp_op *op)
+{
+ struct nvmefc_fcp_req *freq = &op->fcp_req;
+
+ if (!freq->sg_cnt)
+ return;
+
+ fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
+ ((rq_data_dir(rq) == WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE));
+
+ nvme_cleanup_cmd(rq);
+
+ sg_free_table_chained(&freq->sg_table, true);
+
+ freq->sg_cnt = 0;
+}
+
+/*
+ * In FC, the queue is a logical thing. At transport connect, the target
+ * creates its "queue" and returns a handle that is to be given to the
+ * target whenever it posts something to the corresponding SQ. When an
+ * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
+ * command contained within the SQE, an io, and assigns a FC exchange
+ * to it. The SQE and the associated SQ handle are sent in the initial
+ * CMD IU sents on the exchange. All transfers relative to the io occur
+ * as part of the exchange. The CQE is the last thing for the io,
+ * which is transferred (explicitly or implicitly) with the RSP IU
+ * sent on the exchange. After the CQE is received, the FC exchange is
+ * terminaed and the Exchange may be used on a different io.
+ *
+ * The transport to LLDD api has the transport making a request for a
+ * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
+ * resource and transfers the command. The LLDD will then process all
+ * steps to complete the io. Upon completion, the transport done routine
+ * is called.
+ *
+ * So - while the operation is outstanding to the LLDD, there is a link
+ * level FC exchange resource that is also outstanding. This must be
+ * considered in all cleanup operations.
+ */
+static int
+nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+ struct nvme_fc_fcp_op *op, u32 data_len,
+ enum nvmefc_fcp_datadir io_dir)
+{
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ struct nvme_command *sqe = &cmdiu->sqe;
+ u32 csn;
+ int ret;
+
+ if (!nvme_fc_ctrl_get(ctrl))
+ return BLK_MQ_RQ_QUEUE_ERROR;
+
+ /* format the FC-NVME CMD IU and fcp_req */
+ cmdiu->connection_id = cpu_to_be64(queue->connection_id);
+ csn = atomic_inc_return(&queue->csn);
+ cmdiu->csn = cpu_to_be32(csn);
+ cmdiu->data_len = cpu_to_be32(data_len);
+ switch (io_dir) {
+ case NVMEFC_FCP_WRITE:
+ cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
+ break;
+ case NVMEFC_FCP_READ:
+ cmdiu->flags = FCNVME_CMD_FLAGS_READ;
+ break;
+ case NVMEFC_FCP_NODATA:
+ cmdiu->flags = 0;
+ break;
+ }
+ op->fcp_req.payload_length = data_len;
+ op->fcp_req.io_dir = io_dir;
+ op->fcp_req.transferred_length = 0;
+ op->fcp_req.rcv_rsplen = 0;
+ op->fcp_req.status = 0;
+ op->fcp_req.sqid = cpu_to_le16(queue->qnum);
+
+ /*
+ * validate per fabric rules, set fields mandated by fabric spec
+ * as well as those by FC-NVME spec.
+ */
+ WARN_ON_ONCE(sqe->common.metadata);
+ WARN_ON_ONCE(sqe->common.dptr.prp1);
+ WARN_ON_ONCE(sqe->common.dptr.prp2);
+ sqe->common.flags |= NVME_CMD_SGL_METABUF;
+
+ /*
+ * format SQE DPTR field per FC-NVME rules
+ * type=data block descr; subtype=offset;
+ * offset is currently 0.
+ */
+ sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
+ sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
+ sqe->rw.dptr.sgl.addr = 0;
+
+ /* odd that we set the command_id - should come from nvme-fabrics */
+ WARN_ON_ONCE(sqe->common.command_id != cpu_to_le16(op->rqno));
+
+ if (op->rq) { /* skipped on aens */
+ ret = nvme_fc_map_data(ctrl, op->rq, op);
+ if (ret < 0) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Failed to map data (%d)\n", ret);
+ nvme_cleanup_cmd(op->rq);
+ nvme_fc_ctrl_put(ctrl);
+ return (ret == -ENOMEM || ret == -EAGAIN) ?
+ BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+ }
+ }
+
+ fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
+ sizeof(op->cmd_iu), DMA_TO_DEVICE);
+
+ atomic_set(&op->state, FCPOP_STATE_ACTIVE);
+
+ if (op->rq)
+ blk_mq_start_request(op->rq);
+
+ ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
+ &ctrl->rport->remoteport,
+ queue->lldd_handle, &op->fcp_req);
+
+ if (ret) {
+ dev_err(ctrl->dev,
+ "Send nvme command failed - lldd returned %d.\n", ret);
+
+ if (op->rq) { /* normal request */
+ nvme_fc_unmap_data(ctrl, op->rq, op);
+ nvme_cleanup_cmd(op->rq);
+ }
+ /* else - aen. no cleanup needed */
+
+ nvme_fc_ctrl_put(ctrl);
+
+ if (ret != -EBUSY)
+ return BLK_MQ_RQ_QUEUE_ERROR;
+
+ if (op->rq) {
+ blk_mq_stop_hw_queues(op->rq->q);
+ blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
+ }
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static int
+nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_fc_queue *queue = hctx->driver_data;
+ struct nvme_fc_ctrl *ctrl = queue->ctrl;
+ struct request *rq = bd->rq;
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ struct nvme_command *sqe = &cmdiu->sqe;
+ enum nvmefc_fcp_datadir io_dir;
+ u32 data_len;
+ int ret;
+
+ ret = nvme_setup_cmd(ns, rq, sqe);
+ if (ret)
+ return ret;
+
+ data_len = nvme_map_len(rq);
+ if (data_len)
+ io_dir = ((rq_data_dir(rq) == WRITE) ?
+ NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
+ else
+ io_dir = NVMEFC_FCP_NODATA;
+
+ return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
+}
+
+static struct blk_mq_tags *
+nvme_fc_tagset(struct nvme_fc_queue *queue)
+{
+ if (queue->qnum == 0)
+ return queue->ctrl->admin_tag_set.tags[queue->qnum];
+
+ return queue->ctrl->tag_set.tags[queue->qnum - 1];
+}
+
+static int
+nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
+
+{
+ struct nvme_fc_queue *queue = hctx->driver_data;
+ struct nvme_fc_ctrl *ctrl = queue->ctrl;
+ struct request *req;
+ struct nvme_fc_fcp_op *op;
+
+ req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
+ if (!req) {
+ dev_err(queue->ctrl->ctrl.device,
+ "tag 0x%x on QNum %#x not found\n",
+ tag, queue->qnum);
+ return 0;
+ }
+
+ op = blk_mq_rq_to_pdu(req);
+
+ if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
+ (ctrl->lport->ops->poll_queue))
+ ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
+ queue->lldd_handle);
+
+ return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
+}
+
+static void
+nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
+ struct nvme_fc_fcp_op *aen_op;
+ int ret;
+
+ if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
+ return;
+
+ aen_op = &ctrl->aen_ops[aer_idx];
+
+ ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
+ NVMEFC_FCP_NODATA);
+ if (ret)
+ dev_err(ctrl->ctrl.device,
+ "failed async event work [%d]\n", aer_idx);
+}
+
+static void
+nvme_fc_complete_rq(struct request *rq)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_ctrl *ctrl = op->ctrl;
+ int error = 0, state;
+
+ state = atomic_xchg(&op->state, FCPOP_STATE_IDLE);
+
+ nvme_cleanup_cmd(rq);
+
+ nvme_fc_unmap_data(ctrl, rq, op);
+
+ if (unlikely(rq->errors)) {
+ if (nvme_req_needs_retry(rq, rq->errors)) {
+ nvme_requeue_req(rq);
+ return;
+ }
+
+ if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ error = rq->errors;
+ else
+ error = nvme_error_status(rq->errors);
+ }
+
+ nvme_fc_ctrl_put(ctrl);
+
+ blk_mq_end_request(rq, error);
+}
+
+static struct blk_mq_ops nvme_fc_mq_ops = {
+ .queue_rq = nvme_fc_queue_rq,
+ .complete = nvme_fc_complete_rq,
+ .init_request = nvme_fc_init_request,
+ .exit_request = nvme_fc_exit_request,
+ .reinit_request = nvme_fc_reinit_request,
+ .init_hctx = nvme_fc_init_hctx,
+ .poll = nvme_fc_poll,
+ .timeout = nvme_fc_timeout,
+};
+
+static struct blk_mq_ops nvme_fc_admin_mq_ops = {
+ .queue_rq = nvme_fc_queue_rq,
+ .complete = nvme_fc_complete_rq,
+ .init_request = nvme_fc_init_admin_request,
+ .exit_request = nvme_fc_exit_request,
+ .reinit_request = nvme_fc_reinit_request,
+ .init_hctx = nvme_fc_init_admin_hctx,
+ .timeout = nvme_fc_timeout,
+};
+
+static int
+nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
+{
+ u32 segs;
+ int error;
+
+ nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
+
+ error = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
+ NVME_FC_AQ_BLKMQ_DEPTH,
+ (NVME_FC_AQ_BLKMQ_DEPTH / 4));
+ if (error)
+ return error;
+
+ memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
+ ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
+ ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
+ ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
+ ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
+ (SG_CHUNK_SIZE *
+ sizeof(struct scatterlist)) +
+ ctrl->lport->ops->fcprqst_priv_sz;
+ ctrl->admin_tag_set.driver_data = ctrl;
+ ctrl->admin_tag_set.nr_hw_queues = 1;
+ ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+
+ error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ if (error)
+ goto out_free_queue;
+
+ ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+ if (IS_ERR(ctrl->ctrl.admin_q)) {
+ error = PTR_ERR(ctrl->ctrl.admin_q);
+ goto out_free_tagset;
+ }
+
+ error = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
+ NVME_FC_AQ_BLKMQ_DEPTH);
+ if (error)
+ goto out_cleanup_queue;
+
+ error = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (error)
+ goto out_delete_hw_queue;
+
+ error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (error) {
+ dev_err(ctrl->ctrl.device,
+ "prop_get NVME_REG_CAP failed\n");
+ goto out_delete_hw_queue;
+ }
+
+ ctrl->ctrl.sqsize =
+ min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+
+ error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+ if (error)
+ goto out_delete_hw_queue;
+
+ segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
+ ctrl->lport->ops->max_sgl_segments);
+ ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
+
+ error = nvme_init_identify(&ctrl->ctrl);
+ if (error)
+ goto out_delete_hw_queue;
+
+ nvme_start_keep_alive(&ctrl->ctrl);
+
+ return 0;
+
+out_delete_hw_queue:
+ __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
+out_cleanup_queue:
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_free_tagset:
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_free_queue:
+ nvme_fc_free_queue(&ctrl->queues[0]);
+ return error;
+}
+
+/*
+ * This routine is used by the transport when it needs to find active
+ * io on a queue that is to be terminated. The transport uses
+ * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
+ * this routine to kill them on a 1 by 1 basis.
+ *
+ * As FC allocates FC exchange for each io, the transport must contact
+ * the LLDD to terminate the exchange, thus releasing the FC exchange.
+ * After terminating the exchange the LLDD will call the transport's
+ * normal io done path for the request, but it will have an aborted
+ * status. The done path will return the io request back to the block
+ * layer with an error status.
+ */
+static void
+nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+{
+ struct nvme_ctrl *nctrl = data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+int status;
+
+ if (!blk_mq_request_started(req))
+ return;
+
+ /* this performs an ABTS-LS on the FC exchange for the io */
+ status = __nvme_fc_abort_op(ctrl, op);
+ /*
+ * if __nvme_fc_abort_op failed: io wasn't active to abort
+ * consider it done. Assume completion path already completing
+ * in parallel
+ */
+ if (status)
+ /* io wasn't active to abort consider it done */
+ /* assume completion path already completing in parallel */
+ return;
+}
+
+
+/*
+ * This routine stops operation of the controller. Admin and IO queues
+ * are stopped, outstanding ios on them terminated, and the nvme ctrl
+ * is shutdown.
+ */
+static void
+nvme_fc_shutdown_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+ /*
+ * If io queues are present, stop them and terminate all outstanding
+ * ios on them. As FC allocates FC exchange for each io, the
+ * transport must contact the LLDD to terminate the exchange,
+ * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
+ * to tell us what io's are busy and invoke a transport routine
+ * to kill them with the LLDD. After terminating the exchange
+ * the LLDD will call the transport's normal io done path, but it
+ * will have an aborted status. The done path will return the
+ * io requests back to the block layer as part of normal completions
+ * (but with error status).
+ */
+ if (ctrl->queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ }
+
+ if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ nvme_shutdown_ctrl(&ctrl->ctrl);
+
+ /*
+ * now clean up the admin queue. Same thing as above.
+ * use blk_mq_tagset_busy_itr() and the transport routine to
+ * terminate the exchanges.
+ */
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+}
+
+/*
+ * Called to teardown an association.
+ * May be called with association fully in place or partially in place.
+ */
+static void
+__nvme_fc_remove_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+ nvme_stop_keep_alive(&ctrl->ctrl);
+
+ /* stop and terminate ios on admin and io queues */
+ nvme_fc_shutdown_ctrl(ctrl);
+
+ /*
+ * tear down the controller
+ * This will result in the last reference on the nvme ctrl to
+ * expire, calling the transport nvme_fc_free_nvme_ctrl() callback.
+ * From there, the transport will tear down it's logical queues and
+ * association.
+ */
+ nvme_uninit_ctrl(&ctrl->ctrl);
+
+ nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static void
+nvme_fc_del_ctrl_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, delete_work);
+
+ __nvme_fc_remove_ctrl(ctrl);
+}
+
+static int
+__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
+ return -EBUSY;
+
+ if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
+ return -EBUSY;
+
+ return 0;
+}
+
+/*
+ * Request from nvme core layer to delete the controller
+ */
+static int
+nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ struct nvme_fc_rport *rport = ctrl->rport;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ ret = __nvme_fc_del_ctrl(ctrl);
+ spin_unlock_irqrestore(&rport->lock, flags);
+ if (ret)
+ return ret;
+
+ flush_work(&ctrl->delete_work);
+
+ return 0;
+}
+
+static int
+nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
+{
+ return -EIO;
+}
+
+static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
+ .name = "fc",
+ .module = THIS_MODULE,
+ .is_fabrics = true,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .reset_ctrl = nvme_fc_reset_nvme_ctrl,
+ .free_ctrl = nvme_fc_free_nvme_ctrl,
+ .submit_async_event = nvme_fc_submit_async_event,
+ .delete_ctrl = nvme_fc_del_nvme_ctrl,
+ .get_subsysnqn = nvmf_get_subsysnqn,
+ .get_address = nvmf_get_address,
+};
+
+static int
+nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ int ret;
+
+ ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "set_queue_count failed: %d\n", ret);
+ return ret;
+ }
+
+ ctrl->queue_count = opts->nr_io_queues + 1;
+ if (!opts->nr_io_queues)
+ return 0;
+
+ dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
+ opts->nr_io_queues);
+
+ nvme_fc_init_io_queues(ctrl);
+
+ memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
+ ctrl->tag_set.ops = &nvme_fc_mq_ops;
+ ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
+ ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+ ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
+ (SG_CHUNK_SIZE *
+ sizeof(struct scatterlist)) +
+ ctrl->lport->ops->fcprqst_priv_sz;
+ ctrl->tag_set.driver_data = ctrl;
+ ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+ ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
+
+ ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ if (ret)
+ return ret;
+
+ ctrl->ctrl.tagset = &ctrl->tag_set;
+
+ ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+ if (IS_ERR(ctrl->ctrl.connect_q)) {
+ ret = PTR_ERR(ctrl->ctrl.connect_q);
+ goto out_free_tag_set;
+ }
+
+ ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+ if (ret)
+ goto out_cleanup_blk_queue;
+
+ ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+ if (ret)
+ goto out_delete_hw_queues;
+
+ return 0;
+
+out_delete_hw_queues:
+ nvme_fc_delete_hw_io_queues(ctrl);
+out_cleanup_blk_queue:
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+out_free_tag_set:
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ nvme_fc_free_io_queues(ctrl);
+
+ /* force put free routine to ignore io queues */
+ ctrl->ctrl.tagset = NULL;
+
+ return ret;
+}
+
+
+static struct nvme_ctrl *
+__nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+ int ret, idx;
+ bool changed;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ ret = -ENOMEM;
+ goto out_fail;
+ }
+
+ idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_free_ctrl;
+ }
+
+ ctrl->ctrl.opts = opts;
+ INIT_LIST_HEAD(&ctrl->ctrl_list);
+ INIT_LIST_HEAD(&ctrl->ls_req_list);
+ ctrl->lport = lport;
+ ctrl->rport = rport;
+ ctrl->dev = lport->dev;
+ ctrl->state = FCCTRL_INIT;
+ ctrl->cnum = idx;
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
+ if (ret)
+ goto out_free_ida;
+
+ get_device(ctrl->dev);
+ kref_init(&ctrl->ref);
+
+ INIT_WORK(&ctrl->delete_work, nvme_fc_del_ctrl_work);
+ spin_lock_init(&ctrl->lock);
+
+ /* io queue count */
+ ctrl->queue_count = min_t(unsigned int,
+ opts->nr_io_queues,
+ lport->ops->max_hw_queues);
+ opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */
+ ctrl->queue_count++; /* +1 for admin queue */
+
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.kato = opts->kato;
+
+ ret = -ENOMEM;
+ ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
+ GFP_KERNEL);
+ if (!ctrl->queues)
+ goto out_uninit_ctrl;
+
+ ret = nvme_fc_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_uninit_ctrl;
+
+ /* sanity checks */
+
+ /* FC-NVME supports 64-byte SQE only */
+ if (ctrl->ctrl.ioccsz != 4) {
+ dev_err(ctrl->ctrl.device, "ioccsz %d is not supported!\n",
+ ctrl->ctrl.ioccsz);
+ goto out_remove_admin_queue;
+ }
+ /* FC-NVME supports 16-byte CQE only */
+ if (ctrl->ctrl.iorcsz != 1) {
+ dev_err(ctrl->ctrl.device, "iorcsz %d is not supported!\n",
+ ctrl->ctrl.iorcsz);
+ goto out_remove_admin_queue;
+ }
+ /* FC-NVME does not have other data in the capsule */
+ if (ctrl->ctrl.icdoff) {
+ dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
+ ctrl->ctrl.icdoff);
+ goto out_remove_admin_queue;
+ }
+
+ /* FC-NVME supports normal SGL Data Block Descriptors */
+
+ if (opts->queue_size > ctrl->ctrl.maxcmd) {
+ /* warn if maxcmd is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl maxcmd %u, reducing "
+ "to queue_size\n",
+ opts->queue_size, ctrl->ctrl.maxcmd);
+ opts->queue_size = ctrl->ctrl.maxcmd;
+ }
+
+ ret = nvme_fc_init_aen_ops(ctrl);
+ if (ret)
+ goto out_exit_aen_ops;
+
+ if (ctrl->queue_count > 1) {
+ ret = nvme_fc_create_io_queues(ctrl);
+ if (ret)
+ goto out_exit_aen_ops;
+ }
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ ctrl->state = FCCTRL_ACTIVE;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n",
+ ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl);
+
+ kref_get(&ctrl->ctrl.kref);
+
+ spin_lock_irqsave(&rport->lock, flags);
+ list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ if (opts->nr_io_queues) {
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
+
+ return &ctrl->ctrl;
+
+out_exit_aen_ops:
+ nvme_fc_exit_aen_ops(ctrl);
+out_remove_admin_queue:
+ /* send a Disconnect(association) LS to fc-nvme target */
+ nvme_fc_xmt_disconnect_assoc(ctrl);
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ nvme_fc_destroy_admin_queue(ctrl);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+ if (ret > 0)
+ ret = -EIO;
+ /* exit via here will follow ctlr ref point callbacks to free */
+ return ERR_PTR(ret);
+
+out_free_ida:
+ ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+out_free_ctrl:
+ kfree(ctrl);
+out_fail:
+ nvme_fc_rport_put(rport);
+ /* exit via here doesn't follow ctlr ref points */
+ return ERR_PTR(ret);
+}
+
+enum {
+ FCT_TRADDR_ERR = 0,
+ FCT_TRADDR_WWNN = 1 << 0,
+ FCT_TRADDR_WWPN = 1 << 1,
+};
+
+struct nvmet_fc_traddr {
+ u64 nn;
+ u64 pn;
+};
+
+static const match_table_t traddr_opt_tokens = {
+ { FCT_TRADDR_WWNN, "nn-%s" },
+ { FCT_TRADDR_WWPN, "pn-%s" },
+ { FCT_TRADDR_ERR, NULL }
+};
+
+static int
+nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ u64 token64;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ":\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, traddr_opt_tokens, args);
+ switch (token) {
+ case FCT_TRADDR_WWNN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ traddr->nn = token64;
+ break;
+ case FCT_TRADDR_WWPN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ traddr->pn = token64;
+ break;
+ default:
+ pr_warn("unknown traddr token or missing value '%s'\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ kfree(options);
+ return ret;
+}
+
+static struct nvme_ctrl *
+nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
+{
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+ struct nvmet_fc_traddr laddr = { 0L, 0L };
+ struct nvmet_fc_traddr raddr = { 0L, 0L };
+ unsigned long flags;
+ int ret;
+
+ ret = nvme_fc_parse_address(&raddr, opts->traddr);
+ if (ret || !raddr.nn || !raddr.pn)
+ return ERR_PTR(-EINVAL);
+
+ ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
+ if (ret || !laddr.nn || !laddr.pn)
+ return ERR_PTR(-EINVAL);
+
+ /* find the host and remote ports to connect together */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+ if (lport->localport.node_name != laddr.nn ||
+ lport->localport.port_name != laddr.pn)
+ continue;
+
+ list_for_each_entry(rport, &lport->endp_list, endp_list) {
+ if (rport->remoteport.node_name != raddr.nn ||
+ rport->remoteport.port_name != raddr.pn)
+ continue;
+
+ /* if fail to get reference fall through. Will error */
+ if (!nvme_fc_rport_get(rport))
+ break;
+
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return __nvme_fc_create_ctrl(dev, opts, lport, rport);
+ }
+ }
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return ERR_PTR(-ENOENT);
+}
+
+
+static struct nvmf_transport_ops nvme_fc_transport = {
+ .name = "fc",
+ .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
+ .allowed_opts = NVMF_OPT_RECONNECT_DELAY,
+ .create_ctrl = nvme_fc_create_ctrl,
+};
+
+static int __init nvme_fc_init_module(void)
+{
+ nvme_fc_wq = create_workqueue("nvme_fc_wq");
+ if (!nvme_fc_wq)
+ return -ENOMEM;
+
+ nvmf_register_transport(&nvme_fc_transport);
+ return 0;
+}
+
+static void __exit nvme_fc_exit_module(void)
+{
+ /* sanity check - all lports should be removed */
+ if (!list_empty(&nvme_fc_lport_list))
+ pr_warn("%s: localport list not empty\n", __func__);
+
+ nvmf_unregister_transport(&nvme_fc_transport);
+
+ destroy_workqueue(nvme_fc_wq);
+
+ ida_destroy(&nvme_fc_local_port_cnt);
+ ida_destroy(&nvme_fc_ctrl_cnt);
+}
+
+module_init(nvme_fc_init_module);
+module_exit(nvme_fc_exit_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 5daf2f4be0cd..588d4a34c083 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,14 +146,6 @@ struct nvme_nvm_command {
};
};
-struct nvme_nvm_completion {
- __le64 result; /* Used by LightNVM to return ppa completions */
- __le16 sq_head; /* how much of this queue may be reclaimed */
- __le16 sq_id; /* submission queue that generated this entry */
- __u16 command_id; /* of the command which completed */
- __le16 status; /* did the command fail, and if so, why? */
-};
-
#define NVME_NVM_LP_MLC_PAIRS 886
struct nvme_nvm_lp_mlc {
__le16 num_pairs;
@@ -360,6 +352,7 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
while (nlb) {
u32 cmd_nlb = min(nlb_pr_rq, nlb);
+ u64 elba = slba + cmd_nlb;
c.l2p.slba = cpu_to_le64(cmd_slba);
c.l2p.nlb = cpu_to_le32(cmd_nlb);
@@ -373,6 +366,14 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
goto out;
}
+ if (unlikely(elba > nvmdev->total_secs)) {
+ pr_err("nvm: L2P data from device is out of bounds!\n");
+ return -EINVAL;
+ }
+
+ /* Transform physical address to target address space */
+ nvmdev->mt->part_to_tgt(nvmdev, entries, cmd_nlb);
+
if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
ret = -EINTR;
goto out;
@@ -391,11 +392,12 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
u8 *blks)
{
struct request_queue *q = nvmdev->q;
+ struct nvm_geo *geo = &nvmdev->geo;
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
+ int nr_blks = geo->blks_per_lun * geo->plane_mode;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
@@ -436,7 +438,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}
- memcpy(blks, bb_tbl->blk, nvmdev->blks_per_lun * nvmdev->plane_mode);
+ memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode);
out:
kfree(bb_tbl);
return ret;
@@ -481,14 +483,11 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
static void nvme_nvm_end_io(struct request *rq, int error)
{
struct nvm_rq *rqd = rq->end_io_data;
- struct nvme_nvm_completion *cqe = rq->special;
-
- if (cqe)
- rqd->ppa_status = le64_to_cpu(cqe->result);
+ rqd->ppa_status = nvme_req(rq)->result.u64;
nvm_end_io(rqd, error);
- kfree(rq->cmd);
+ kfree(nvme_req(rq)->cmd);
blk_mq_free_request(rq);
}
@@ -500,20 +499,18 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct bio *bio = rqd->bio;
struct nvme_nvm_command *cmd;
- rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
- if (IS_ERR(rq))
+ cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
+ if (!cmd)
return -ENOMEM;
- cmd = kzalloc(sizeof(struct nvme_nvm_command) +
- sizeof(struct nvme_nvm_completion), GFP_KERNEL);
- if (!cmd) {
- blk_mq_free_request(rq);
+ rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
+ if (IS_ERR(rq)) {
+ kfree(cmd);
return -ENOMEM;
}
+ rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->ioprio = bio_prio(bio);
-
if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -522,10 +519,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
- rq->cmd = (unsigned char *)cmd;
- rq->cmd_len = sizeof(struct nvme_nvm_command);
- rq->special = cmd + 1;
-
rq->end_io_data = rqd;
blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
@@ -543,6 +536,7 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
c.erase.nsid = cpu_to_le32(ns->ns_id);
c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
+ c.erase.control = cpu_to_le16(rqd->flags);
return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
}
@@ -592,12 +586,10 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.max_phys_sect = 64,
};
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
- const struct attribute_group *attrs)
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
{
struct request_queue *q = ns->queue;
struct nvm_dev *dev;
- int ret;
dev = nvm_alloc_dev(node);
if (!dev)
@@ -606,18 +598,10 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
dev->q = q;
memcpy(dev->name, disk_name, DISK_NAME_LEN);
dev->ops = &nvme_nvm_dev_ops;
- dev->parent_dev = ns->ctrl->device;
dev->private_data = ns;
ns->ndev = dev;
- ret = nvm_register(dev);
-
- ns->lba_shift = ilog2(dev->sec_size);
-
- if (sysfs_create_group(&dev->dev.kobj, attrs))
- pr_warn("%s: failed to create sysfs group for identification\n",
- disk_name);
- return ret;
+ return nvm_register(dev);
}
void nvme_nvm_unregister(struct nvme_ns *ns)
@@ -625,6 +609,167 @@ void nvme_nvm_unregister(struct nvme_ns *ns)
nvm_unregister(ns->ndev);
}
+static ssize_t nvm_dev_attr_show(struct device *dev,
+ struct device_attribute *dattr, char *page)
+{
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+ struct nvm_dev *ndev = ns->ndev;
+ struct nvm_id *id;
+ struct nvm_id_group *grp;
+ struct attribute *attr;
+
+ if (!ndev)
+ return 0;
+
+ id = &ndev->identity;
+ grp = &id->groups[0];
+ attr = &dattr->attr;
+
+ if (strcmp(attr->name, "version") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
+ } else if (strcmp(attr->name, "vendor_opcode") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
+ } else if (strcmp(attr->name, "capabilities") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
+ } else if (strcmp(attr->name, "device_mode") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
+ } else if (strcmp(attr->name, "media_manager") == 0) {
+ if (!ndev->mt)
+ return scnprintf(page, PAGE_SIZE, "%s\n", "none");
+ return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
+ } else if (strcmp(attr->name, "ppa_format") == 0) {
+ return scnprintf(page, PAGE_SIZE,
+ "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ id->ppaf.ch_offset, id->ppaf.ch_len,
+ id->ppaf.lun_offset, id->ppaf.lun_len,
+ id->ppaf.pln_offset, id->ppaf.pln_len,
+ id->ppaf.blk_offset, id->ppaf.blk_len,
+ id->ppaf.pg_offset, id->ppaf.pg_len,
+ id->ppaf.sect_offset, id->ppaf.sect_len);
+ } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
+ } else if (strcmp(attr->name, "flash_media_type") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
+ } else if (strcmp(attr->name, "num_channels") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
+ } else if (strcmp(attr->name, "num_luns") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
+ } else if (strcmp(attr->name, "num_planes") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
+ } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+ } else if (strcmp(attr->name, "num_pages") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
+ } else if (strcmp(attr->name, "page_size") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
+ } else if (strcmp(attr->name, "hw_sector_size") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
+ } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
+ } else if (strcmp(attr->name, "read_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
+ } else if (strcmp(attr->name, "read_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
+ } else if (strcmp(attr->name, "prog_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
+ } else if (strcmp(attr->name, "prog_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
+ } else if (strcmp(attr->name, "erase_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
+ } else if (strcmp(attr->name, "erase_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
+ } else if (strcmp(attr->name, "multiplane_modes") == 0) {
+ return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
+ } else if (strcmp(attr->name, "media_capabilities") == 0) {
+ return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
+ } else if (strcmp(attr->name, "max_phys_secs") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n",
+ ndev->ops->max_phys_sect);
+ } else {
+ return scnprintf(page,
+ PAGE_SIZE,
+ "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
+ attr->name);
+ }
+}
+
+#define NVM_DEV_ATTR_RO(_name) \
+ DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
+
+static NVM_DEV_ATTR_RO(version);
+static NVM_DEV_ATTR_RO(vendor_opcode);
+static NVM_DEV_ATTR_RO(capabilities);
+static NVM_DEV_ATTR_RO(device_mode);
+static NVM_DEV_ATTR_RO(ppa_format);
+static NVM_DEV_ATTR_RO(media_manager);
+
+static NVM_DEV_ATTR_RO(media_type);
+static NVM_DEV_ATTR_RO(flash_media_type);
+static NVM_DEV_ATTR_RO(num_channels);
+static NVM_DEV_ATTR_RO(num_luns);
+static NVM_DEV_ATTR_RO(num_planes);
+static NVM_DEV_ATTR_RO(num_blocks);
+static NVM_DEV_ATTR_RO(num_pages);
+static NVM_DEV_ATTR_RO(page_size);
+static NVM_DEV_ATTR_RO(hw_sector_size);
+static NVM_DEV_ATTR_RO(oob_sector_size);
+static NVM_DEV_ATTR_RO(read_typ);
+static NVM_DEV_ATTR_RO(read_max);
+static NVM_DEV_ATTR_RO(prog_typ);
+static NVM_DEV_ATTR_RO(prog_max);
+static NVM_DEV_ATTR_RO(erase_typ);
+static NVM_DEV_ATTR_RO(erase_max);
+static NVM_DEV_ATTR_RO(multiplane_modes);
+static NVM_DEV_ATTR_RO(media_capabilities);
+static NVM_DEV_ATTR_RO(max_phys_secs);
+
+static struct attribute *nvm_dev_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_vendor_opcode.attr,
+ &dev_attr_capabilities.attr,
+ &dev_attr_device_mode.attr,
+ &dev_attr_media_manager.attr,
+
+ &dev_attr_ppa_format.attr,
+ &dev_attr_media_type.attr,
+ &dev_attr_flash_media_type.attr,
+ &dev_attr_num_channels.attr,
+ &dev_attr_num_luns.attr,
+ &dev_attr_num_planes.attr,
+ &dev_attr_num_blocks.attr,
+ &dev_attr_num_pages.attr,
+ &dev_attr_page_size.attr,
+ &dev_attr_hw_sector_size.attr,
+ &dev_attr_oob_sector_size.attr,
+ &dev_attr_read_typ.attr,
+ &dev_attr_read_max.attr,
+ &dev_attr_prog_typ.attr,
+ &dev_attr_prog_max.attr,
+ &dev_attr_erase_typ.attr,
+ &dev_attr_erase_max.attr,
+ &dev_attr_multiplane_modes.attr,
+ &dev_attr_media_capabilities.attr,
+ &dev_attr_max_phys_secs.attr,
+ NULL,
+};
+
+static const struct attribute_group nvm_dev_attr_group = {
+ .name = "lightnvm",
+ .attrs = nvm_dev_attrs,
+};
+
+int nvme_nvm_register_sysfs(struct nvme_ns *ns)
+{
+ return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
+ &nvm_dev_attr_group);
+}
+
+void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
+{
+ sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
+ &nvm_dev_attr_group);
+}
+
/* move to shared place when used in multiple places. */
#define PCI_VENDOR_ID_CNEX 0x1d1d
#define PCI_DEVICE_ID_CNEX_WL 0x2807
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d47f5a5d18c7..bd5321441d12 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -79,6 +79,20 @@ enum nvme_quirks {
NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
};
+/*
+ * Common request structure for NVMe passthrough. All drivers must have
+ * this structure as the first member of their request-private data.
+ */
+struct nvme_request {
+ struct nvme_command *cmd;
+ union nvme_result result;
+};
+
+static inline struct nvme_request *nvme_req(struct request *req)
+{
+ return blk_mq_rq_to_pdu(req);
+}
+
/* The below value is the specific amount of delay needed before checking
* readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
@@ -222,8 +236,10 @@ static inline unsigned nvme_map_len(struct request *rq)
static inline void nvme_cleanup_cmd(struct request *req)
{
- if (req_op(req) == REQ_OP_DISCARD)
- kfree(req->completion_data);
+ if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ kfree(page_address(req->special_vec.bv_page) +
+ req->special_vec.bv_offset);
+ }
}
static inline int nvme_error_status(u16 status)
@@ -261,8 +277,8 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
#define NVME_NR_AERS 1
-void nvme_complete_async_event(struct nvme_ctrl *ctrl,
- struct nvme_completion *cqe);
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ union nvme_result *res);
void nvme_queue_async_events(struct nvme_ctrl *ctrl);
void nvme_stop_queues(struct nvme_ctrl *ctrl);
@@ -278,7 +294,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+ union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head, int flags);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result,
@@ -307,36 +323,33 @@ int nvme_sg_get_version_num(int __user *ip);
#ifdef CONFIG_NVM
int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
- const struct attribute_group *attrs);
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
-
-static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
-{
- if (dev->type->devnode)
- return dev_to_disk(dev)->private_data;
-
- return (container_of(dev, struct nvm_dev, dev))->private_data;
-}
+int nvme_nvm_register_sysfs(struct nvme_ns *ns);
+void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
#else
static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
- int node,
- const struct attribute_group *attrs)
+ int node)
{
return 0;
}
static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
-
+static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
+{
+ return 0;
+}
+static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
{
return 0;
}
+#endif /* CONFIG_NVM */
+
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
return dev_to_disk(dev)->private_data;
}
-#endif /* CONFIG_NVM */
int __init nvme_core_init(void);
void nvme_core_exit(void);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5e52034ab010..3ba38f4d774d 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -141,6 +141,7 @@ struct nvme_queue {
* allocated to store the PRP list.
*/
struct nvme_iod {
+ struct nvme_request req;
struct nvme_queue *nvmeq;
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
@@ -302,14 +303,14 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
static __le64 **iod_list(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- return (__le64 **)(iod->sg + req->nr_phys_segments);
+ return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
}
static int nvme_init_iod(struct request *rq, unsigned size,
struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
- int nseg = rq->nr_phys_segments;
+ int nseg = blk_rq_nr_phys_segments(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -324,11 +325,11 @@ static int nvme_init_iod(struct request *rq, unsigned size,
iod->nents = 0;
iod->length = size;
- if (!(rq->cmd_flags & REQ_DONTPREP)) {
+ if (!(rq->rq_flags & RQF_DONTPREP)) {
rq->retries = 0;
- rq->cmd_flags |= REQ_DONTPREP;
+ rq->rq_flags |= RQF_DONTPREP;
}
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
}
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
@@ -339,8 +340,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
__le64 **list = iod_list(req);
dma_addr_t prp_dma = iod->first_dma;
- nvme_cleanup_cmd(req);
-
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
for (i = 0; i < iod->npages; i++) {
@@ -510,7 +509,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_TO_DEVICE : DMA_FROM_DEVICE;
int ret = BLK_MQ_RQ_QUEUE_ERROR;
- sg_init_table(iod->sg, req->nr_phys_segments);
+ sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(q, req, iod->sg);
if (!iod->nents)
goto out;
@@ -566,6 +565,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
}
}
+ nvme_cleanup_cmd(req);
nvme_free_iod(dev, req);
}
@@ -596,22 +596,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
- map_len = nvme_map_len(req);
- ret = nvme_init_iod(req, map_len, dev);
- if (ret)
+ ret = nvme_setup_cmd(ns, req, &cmnd);
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
- ret = nvme_setup_cmd(ns, req, &cmnd);
- if (ret)
- goto out;
+ map_len = nvme_map_len(req);
+ ret = nvme_init_iod(req, map_len, dev);
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
+ goto out_free_cmd;
- if (req->nr_phys_segments)
+ if (blk_rq_nr_phys_segments(req))
ret = nvme_map_data(dev, req, map_len, &cmnd);
- if (ret)
- goto out;
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
+ goto out_cleanup_iod;
- cmnd.common.command_id = req->tag;
blk_mq_start_request(req);
spin_lock_irq(&nvmeq->q_lock);
@@ -621,14 +620,16 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
else
ret = BLK_MQ_RQ_QUEUE_ERROR;
spin_unlock_irq(&nvmeq->q_lock);
- goto out;
+ goto out_cleanup_iod;
}
__nvme_submit_cmd(nvmeq, &cmnd);
nvme_process_cq(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
return BLK_MQ_RQ_QUEUE_OK;
-out:
+out_cleanup_iod:
nvme_free_iod(dev, req);
+out_free_cmd:
+ nvme_cleanup_cmd(req);
return ret;
}
@@ -703,13 +704,13 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
*/
if (unlikely(nvmeq->qid == 0 &&
cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
- nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe);
+ nvme_complete_async_event(&nvmeq->dev->ctrl,
+ cqe.status, &cqe.result);
continue;
}
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
- if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
- memcpy(req->special, &cqe, sizeof(cqe));
+ nvme_req(req)->result = cqe.result;
blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3d25add36d91..f42ab70ffa38 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -28,7 +28,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
-#include <rdma/ib_cm.h>
#include <linux/nvme-rdma.h>
#include "nvme.h"
@@ -66,6 +65,7 @@ struct nvme_rdma_qe {
struct nvme_rdma_queue;
struct nvme_rdma_request {
+ struct nvme_request req;
struct ib_mr *mr;
struct nvme_rdma_qe sqe;
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
@@ -241,7 +241,9 @@ out_free_ring:
static void nvme_rdma_qp_event(struct ib_event *event, void *context)
{
- pr_debug("QP event %d\n", event->event);
+ pr_debug("QP event %s (%d)\n",
+ ib_event_msg(event->event), event->event);
+
}
static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
@@ -963,8 +965,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
- int nents, count;
- int ret;
+ int count, ret;
req->num_sge = 1;
req->inline_data = false;
@@ -976,16 +977,14 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
return nvme_rdma_set_sg_null(c);
req->sg_table.sgl = req->first_sgl;
- ret = sg_alloc_table_chained(&req->sg_table, rq->nr_phys_segments,
- req->sg_table.sgl);
+ ret = sg_alloc_table_chained(&req->sg_table,
+ blk_rq_nr_phys_segments(rq), req->sg_table.sgl);
if (ret)
return -ENOMEM;
- nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
- BUG_ON(nents > rq->nr_phys_segments);
- req->nents = nents;
+ req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
- count = ib_dma_map_sg(ibdev, req->sg_table.sgl, nents,
+ count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (unlikely(count <= 0)) {
sg_free_table_chained(&req->sg_table, true);
@@ -1130,13 +1129,10 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
struct nvme_completion *cqe, struct ib_wc *wc, int tag)
{
- u16 status = le16_to_cpu(cqe->status);
struct request *rq;
struct nvme_rdma_request *req;
int ret = 0;
- status >>= 1;
-
rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
@@ -1147,9 +1143,6 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
}
req = blk_mq_rq_to_pdu(rq);
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV && rq->special)
- memcpy(rq->special, cqe, sizeof(*cqe));
-
if (rq->tag == tag)
ret = 1;
@@ -1157,8 +1150,8 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
wc->ex.invalidate_rkey == req->mr->rkey)
req->mr->need_inval = false;
- blk_mq_complete_request(rq, status);
-
+ req->req.result = cqe->result;
+ blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
return ret;
}
@@ -1186,7 +1179,8 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
*/
if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
- nvme_complete_async_event(&queue->ctrl->ctrl, cqe);
+ nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
+ &cqe->result);
else
ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
@@ -1433,10 +1427,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ret = nvme_setup_cmd(ns, rq, c);
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
- c->common.command_id = rq->tag;
blk_mq_start_request(rq);
map_len = nvme_map_len(rq);
@@ -1944,6 +1937,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
opts->queue_size = ctrl->ctrl.maxcmd;
}
+ if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
+ /* warn if sqsize is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ opts->queue_size, ctrl->ctrl.sqsize + 1);
+ opts->queue_size = ctrl->ctrl.sqsize + 1;
+ }
+
if (opts->nr_io_queues) {
ret = nvme_rdma_create_io_queues(ctrl);
if (ret)
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index 3eaa4d27801e..b71e95044b43 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -1280,10 +1280,6 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
u16 idx, u16 bd_len, u8 llbaa)
{
- u16 bd_num;
-
- bd_num = bd_len / ((llbaa == 0) ?
- SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
/* Store block descriptor info if a FORMAT UNIT comes later */
/* TODO Saving 1st BD info; what to do if multiple BD received? */
if (llbaa == 0) {
@@ -1528,7 +1524,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int nvme_sc;
struct nvme_id_ns *id_ns;
u8 i;
- u8 flbas, nlbaf;
+ u8 nlbaf;
u8 selected_lbaf = 0xFF;
u32 cdw10 = 0;
struct nvme_command c;
@@ -1539,7 +1535,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
if (res)
return res;
- flbas = (id_ns->flbas) & 0x0F;
nlbaf = id_ns->nlbaf;
for (i = 0; i < nlbaf; i++) {
@@ -2168,12 +2163,10 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *cmd)
{
- u8 immed, pcmod, no_flush, start;
+ u8 immed, no_flush;
immed = cmd[1] & 0x01;
- pcmod = cmd[3] & 0x0f;
no_flush = cmd[4] & 0x04;
- start = cmd[4] & 0x01;
if (immed != 0) {
return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 3a5b9d0576cb..03e4ab65fe77 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -34,3 +34,27 @@ config NVME_TARGET_RDMA
devices over RDMA.
If unsure, say N.
+
+config NVME_TARGET_FC
+ tristate "NVMe over Fabrics FC target driver"
+ depends on NVME_TARGET
+ depends on HAS_DMA
+ help
+ This enables the NVMe FC target support, which allows exporting NVMe
+ devices over FC.
+
+ If unsure, say N.
+
+config NVME_TARGET_FCLOOP
+ tristate "NVMe over Fabrics FC Transport Loopback Test driver"
+ depends on NVME_TARGET
+ select NVME_CORE
+ select NVME_FABRICS
+ select SG_POOL
+ depends on NVME_FC
+ depends on NVME_TARGET_FC
+ help
+ This enables the NVMe FC loopback test support, which can be useful
+ to test NVMe-FC transport interfaces.
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index b7a06232c9da..fecc14f535b2 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -2,8 +2,12 @@
obj-$(CONFIG_NVME_TARGET) += nvmet.o
obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
+obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
+obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
nvmet-y += core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \
discovery.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
+nvmet-fc-y += fc.o
+nvme-fcloop-y += fcloop.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 6fe4c48a21e4..ec1ad2aa0a4c 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -237,7 +237,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
- id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM);
+ id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
+ NVME_CTRL_ONCS_WRITE_ZEROES);
/* XXX: don't report vwc if the underlying device is write through */
id->vwc = NVME_CTRL_VWC_PRESENT;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index af5e2dc4a3d5..d0f60c36d576 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -37,6 +37,8 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
return sprintf(page, "ipv6\n");
case NVMF_ADDR_FAMILY_IB:
return sprintf(page, "ib\n");
+ case NVMF_ADDR_FAMILY_FC:
+ return sprintf(page, "fc\n");
default:
return sprintf(page, "\n");
}
@@ -59,6 +61,8 @@ static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
} else if (sysfs_streq(page, "ib")) {
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
+ } else if (sysfs_streq(page, "fc")) {
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
} else {
pr_err("Invalid value '%s' for adrfam\n", page);
return -EINVAL;
@@ -209,6 +213,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item,
return sprintf(page, "rdma\n");
case NVMF_TRTYPE_LOOP:
return sprintf(page, "loop\n");
+ case NVMF_TRTYPE_FC:
+ return sprintf(page, "fc\n");
default:
return sprintf(page, "\n");
}
@@ -229,6 +235,12 @@ static void nvmet_port_init_tsas_loop(struct nvmet_port *port)
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
}
+static void nvmet_port_init_tsas_fc(struct nvmet_port *port)
+{
+ port->disc_addr.trtype = NVMF_TRTYPE_FC;
+ memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
+}
+
static ssize_t nvmet_addr_trtype_store(struct config_item *item,
const char *page, size_t count)
{
@@ -244,6 +256,8 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item,
nvmet_port_init_tsas_rdma(port);
} else if (sysfs_streq(page, "loop")) {
nvmet_port_init_tsas_loop(port);
+ } else if (sysfs_streq(page, "fc")) {
+ nvmet_port_init_tsas_fc(port);
} else {
pr_err("Invalid value '%s' for trtype\n", page);
return -EINVAL;
@@ -271,7 +285,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
mutex_lock(&subsys->lock);
ret = -EBUSY;
- if (nvmet_ns_enabled(ns))
+ if (ns->enabled)
goto out_unlock;
kfree(ns->device_path);
@@ -307,7 +321,7 @@ static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
int ret = 0;
mutex_lock(&subsys->lock);
- if (nvmet_ns_enabled(ns)) {
+ if (ns->enabled) {
ret = -EBUSY;
goto out_unlock;
}
@@ -339,7 +353,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_nguid);
static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
{
- return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item)));
+ return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
}
static ssize_t nvmet_ns_enable_store(struct config_item *item,
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a21437a33adb..b1d66ed655c9 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -264,7 +264,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
int ret = 0;
mutex_lock(&subsys->lock);
- if (!list_empty(&ns->dev_link))
+ if (ns->enabled)
goto out_unlock;
ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
@@ -309,6 +309,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
+ ns->enabled = true;
ret = 0;
out_unlock:
mutex_unlock(&subsys->lock);
@@ -325,11 +326,11 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock);
- if (list_empty(&ns->dev_link)) {
- mutex_unlock(&subsys->lock);
- return;
- }
- list_del_init(&ns->dev_link);
+ if (!ns->enabled)
+ goto out_unlock;
+
+ ns->enabled = false;
+ list_del_rcu(&ns->dev_link);
mutex_unlock(&subsys->lock);
/*
@@ -351,6 +352,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
if (ns->bdev)
blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
+out_unlock:
mutex_unlock(&subsys->lock);
}
@@ -617,7 +619,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
- req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
}
@@ -638,7 +640,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
pr_warn("could not find controller %d for subsys %s / host %s\n",
cntlid, subsysnqn, hostnqn);
- req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
out:
@@ -700,7 +702,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
- req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
goto out;
}
@@ -709,7 +711,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!nvmet_host_allowed(req, subsys, hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n",
hostnqn, subsysnqn);
- req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
goto out_put_subsystem;
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 9a97ae67e656..f4088198cd0d 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -69,7 +69,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
}
}
- req->rsp->result64 = cpu_to_le64(val);
+ req->rsp->result.u64 = cpu_to_le64(val);
nvmet_req_complete(req, status);
}
@@ -125,7 +125,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
d = kmap(sg_page(req->sg)) + req->sg->offset;
/* zero out initial completion result, assign values as needed */
- req->rsp->result = 0;
+ req->rsp->result.u32 = 0;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
@@ -138,7 +138,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
goto out;
}
@@ -155,7 +155,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_info("creating controller %d for NQN %s.\n",
ctrl->cntlid, ctrl->hostnqn);
- req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
+ req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
out:
kunmap(sg_page(req->sg));
@@ -173,7 +173,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
d = kmap(sg_page(req->sg)) + req->sg->offset;
/* zero out initial completion result, assign values as needed */
- req->rsp->result = 0;
+ req->rsp->result.u32 = 0;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
@@ -191,14 +191,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- req->rsp->result = IPO_IATTR_CONNECT_SQE(qid);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
goto out_ctrl_put;
}
status = nvmet_install_queue(ctrl, req);
if (status) {
/* pass back cntlid that had the issue of installing queue */
- req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
+ req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
goto out_ctrl_put;
}
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
new file mode 100644
index 000000000000..173e842f19c9
--- /dev/null
+++ b/drivers/nvme/target/fc.c
@@ -0,0 +1,2288 @@
+/*
+ * Copyright (c) 2016 Avago Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/blk-mq.h>
+#include <linux/parser.h>
+#include <linux/random.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+
+#include "nvmet.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+/* *************************** Data Structures/Defines ****************** */
+
+
+#define NVMET_LS_CTX_COUNT 4
+
+/* for this implementation, assume small single frame rqst/rsp */
+#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
+
+struct nvmet_fc_tgtport;
+struct nvmet_fc_tgt_assoc;
+
+struct nvmet_fc_ls_iod {
+ struct nvmefc_tgt_ls_req *lsreq;
+ struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
+
+ struct list_head ls_list; /* tgtport->ls_list */
+
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_tgt_assoc *assoc;
+
+ u8 *rqstbuf;
+ u8 *rspbuf;
+ u16 rqstdatalen;
+ dma_addr_t rspdma;
+
+ struct scatterlist sg[2];
+
+ struct work_struct work;
+} __aligned(sizeof(unsigned long long));
+
+#define NVMET_FC_MAX_KB_PER_XFR 256
+
+enum nvmet_fcp_datadir {
+ NVMET_FCP_NODATA,
+ NVMET_FCP_WRITE,
+ NVMET_FCP_READ,
+ NVMET_FCP_ABORTED,
+};
+
+struct nvmet_fc_fcp_iod {
+ struct nvmefc_tgt_fcp_req *fcpreq;
+
+ struct nvme_fc_cmd_iu cmdiubuf;
+ struct nvme_fc_ersp_iu rspiubuf;
+ dma_addr_t rspdma;
+ struct scatterlist *data_sg;
+ struct scatterlist *next_sg;
+ int data_sg_cnt;
+ u32 next_sg_offset;
+ u32 total_length;
+ u32 offset;
+ enum nvmet_fcp_datadir io_dir;
+ bool active;
+ bool abort;
+ spinlock_t flock;
+
+ struct nvmet_req req;
+ struct work_struct work;
+
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_tgt_queue *queue;
+
+ struct list_head fcp_list; /* tgtport->fcp_list */
+};
+
+struct nvmet_fc_tgtport {
+
+ struct nvmet_fc_target_port fc_target_port;
+
+ struct list_head tgt_list; /* nvmet_fc_target_list */
+ struct device *dev; /* dev for dma mapping */
+ struct nvmet_fc_target_template *ops;
+
+ struct nvmet_fc_ls_iod *iod;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct list_head ls_busylist;
+ struct list_head assoc_list;
+ struct ida assoc_cnt;
+ struct nvmet_port *port;
+ struct kref ref;
+};
+
+struct nvmet_fc_tgt_queue {
+ bool ninetypercent;
+ u16 qid;
+ u16 sqsize;
+ u16 ersp_ratio;
+ u16 sqhd;
+ int cpu;
+ atomic_t connected;
+ atomic_t sqtail;
+ atomic_t zrspcnt;
+ atomic_t rsn;
+ spinlock_t qlock;
+ struct nvmet_port *port;
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
+ struct list_head fod_list;
+ struct workqueue_struct *work_q;
+ struct kref ref;
+} __aligned(sizeof(unsigned long long));
+
+struct nvmet_fc_tgt_assoc {
+ u64 association_id;
+ u32 a_id;
+ struct nvmet_fc_tgtport *tgtport;
+ struct list_head a_list;
+ struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
+ struct kref ref;
+};
+
+
+static inline int
+nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
+{
+ return (iodptr - iodptr->tgtport->iod);
+}
+
+static inline int
+nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
+{
+ return (fodptr - fodptr->queue->fod);
+}
+
+
+/*
+ * Association and Connection IDs:
+ *
+ * Association ID will have random number in upper 6 bytes and zero
+ * in lower 2 bytes
+ *
+ * Connection IDs will be Association ID with QID or'd in lower 2 bytes
+ *
+ * note: Association ID = Connection ID for queue 0
+ */
+#define BYTES_FOR_QID sizeof(u16)
+#define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
+#define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
+
+static inline u64
+nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
+{
+ return (assoc->association_id | qid);
+}
+
+static inline u64
+nvmet_fc_getassociationid(u64 connectionid)
+{
+ return connectionid & ~NVMET_FC_QUEUEID_MASK;
+}
+
+static inline u16
+nvmet_fc_getqueueid(u64 connectionid)
+{
+ return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
+}
+
+static inline struct nvmet_fc_tgtport *
+targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
+{
+ return container_of(targetport, struct nvmet_fc_tgtport,
+ fc_target_port);
+}
+
+static inline struct nvmet_fc_fcp_iod *
+nvmet_req_to_fod(struct nvmet_req *nvme_req)
+{
+ return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
+}
+
+
+/* *************************** Globals **************************** */
+
+
+static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
+
+static LIST_HEAD(nvmet_fc_target_list);
+static DEFINE_IDA(nvmet_fc_tgtport_cnt);
+
+
+static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
+static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
+static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
+static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
+static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
+static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
+static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
+static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+
+
+/* *********************** FC-NVME DMA Handling **************************** */
+
+/*
+ * The fcloop device passes in a NULL device pointer. Real LLD's will
+ * pass in a valid device pointer. If NULL is passed to the dma mapping
+ * routines, depending on the platform, it may or may not succeed, and
+ * may crash.
+ *
+ * As such:
+ * Wrapper all the dma routines and check the dev pointer.
+ *
+ * If simple mappings (return just a dma address, we'll noop them,
+ * returning a dma address of 0.
+ *
+ * On more complex mappings (dma_map_sg), a pseudo routine fills
+ * in the scatter list, setting all dma addresses to 0.
+ */
+
+static inline dma_addr_t
+fc_dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
+}
+
+static inline int
+fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dev ? dma_mapping_error(dev, dma_addr) : 0;
+}
+
+static inline void
+fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_single(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+/* pseudo dma_map_sg call */
+static int
+fc_map_sg(struct scatterlist *sg, int nents)
+{
+ struct scatterlist *s;
+ int i;
+
+ WARN_ON(nents == 0 || sg[0].length == 0);
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = 0L;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ }
+ return nents;
+}
+
+static inline int
+fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
+}
+
+static inline void
+fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_sg(dev, sg, nents, dir);
+}
+
+
+/* *********************** FC-NVME Port Management ************************ */
+
+
+static int
+nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_ls_iod *iod;
+ int i;
+
+ iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
+ GFP_KERNEL);
+ if (!iod)
+ return -ENOMEM;
+
+ tgtport->iod = iod;
+
+ for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
+ INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
+ iod->tgtport = tgtport;
+ list_add_tail(&iod->ls_list, &tgtport->ls_list);
+
+ iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!iod->rqstbuf)
+ goto out_fail;
+
+ iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
+
+ iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
+ NVME_FC_MAX_LS_BUFFER_SIZE,
+ DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
+ goto out_fail;
+ }
+
+ return 0;
+
+out_fail:
+ kfree(iod->rqstbuf);
+ list_del(&iod->ls_list);
+ for (iod--, i--; i >= 0; iod--, i--) {
+ fc_dma_unmap_single(tgtport->dev, iod->rspdma,
+ NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ kfree(iod->rqstbuf);
+ list_del(&iod->ls_list);
+ }
+
+ kfree(iod);
+
+ return -EFAULT;
+}
+
+static void
+nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_ls_iod *iod = tgtport->iod;
+ int i;
+
+ for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
+ fc_dma_unmap_single(tgtport->dev,
+ iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
+ DMA_TO_DEVICE);
+ kfree(iod->rqstbuf);
+ list_del(&iod->ls_list);
+ }
+ kfree(tgtport->iod);
+}
+
+static struct nvmet_fc_ls_iod *
+nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
+{
+ static struct nvmet_fc_ls_iod *iod;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ iod = list_first_entry_or_null(&tgtport->ls_list,
+ struct nvmet_fc_ls_iod, ls_list);
+ if (iod)
+ list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ return iod;
+}
+
+
+static void
+nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_move(&iod->ls_list, &tgtport->ls_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+}
+
+static void
+nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_tgt_queue *queue)
+{
+ struct nvmet_fc_fcp_iod *fod = queue->fod;
+ int i;
+
+ for (i = 0; i < queue->sqsize; fod++, i++) {
+ INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
+ fod->tgtport = tgtport;
+ fod->queue = queue;
+ fod->active = false;
+ list_add_tail(&fod->fcp_list, &queue->fod_list);
+ spin_lock_init(&fod->flock);
+
+ fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
+ list_del(&fod->fcp_list);
+ for (fod--, i--; i >= 0; fod--, i--) {
+ fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf),
+ DMA_TO_DEVICE);
+ fod->rspdma = 0L;
+ list_del(&fod->fcp_list);
+ }
+
+ return;
+ }
+ }
+}
+
+static void
+nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_tgt_queue *queue)
+{
+ struct nvmet_fc_fcp_iod *fod = queue->fod;
+ int i;
+
+ for (i = 0; i < queue->sqsize; fod++, i++) {
+ if (fod->rspdma)
+ fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+ }
+}
+
+static struct nvmet_fc_fcp_iod *
+nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
+{
+ static struct nvmet_fc_fcp_iod *fod;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ fod = list_first_entry_or_null(&queue->fod_list,
+ struct nvmet_fc_fcp_iod, fcp_list);
+ if (fod) {
+ list_del(&fod->fcp_list);
+ fod->active = true;
+ fod->abort = false;
+ /*
+ * no queue reference is taken, as it was taken by the
+ * queue lookup just prior to the allocation. The iod
+ * will "inherit" that reference.
+ */
+ }
+ spin_unlock_irqrestore(&queue->qlock, flags);
+ return fod;
+}
+
+
+static void
+nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
+ fod->active = false;
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ /*
+ * release the reference taken at queue lookup and fod allocation
+ */
+ nvmet_fc_tgt_q_put(queue);
+}
+
+static int
+nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
+{
+ int cpu, idx, cnt;
+
+ if (!(tgtport->ops->target_features &
+ NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
+ tgtport->ops->max_hw_queues == 1)
+ return WORK_CPU_UNBOUND;
+
+ /* Simple cpu selection based on qid modulo active cpu count */
+ idx = !qid ? 0 : (qid - 1) % num_active_cpus();
+
+ /* find the n'th active cpu */
+ for (cpu = 0, cnt = 0; ; ) {
+ if (cpu_active(cpu)) {
+ if (cnt == idx)
+ break;
+ cnt++;
+ }
+ cpu = (cpu + 1) % num_possible_cpus();
+ }
+
+ return cpu;
+}
+
+static struct nvmet_fc_tgt_queue *
+nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
+ u16 qid, u16 sqsize)
+{
+ struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
+ int ret;
+
+ if (qid >= NVMET_NR_QUEUES)
+ return NULL;
+
+ queue = kzalloc((sizeof(*queue) +
+ (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
+ GFP_KERNEL);
+ if (!queue)
+ return NULL;
+
+ if (!nvmet_fc_tgt_a_get(assoc))
+ goto out_free_queue;
+
+ queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
+ assoc->tgtport->fc_target_port.port_num,
+ assoc->a_id, qid);
+ if (!queue->work_q)
+ goto out_a_put;
+
+ queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
+ queue->qid = qid;
+ queue->sqsize = sqsize;
+ queue->assoc = assoc;
+ queue->port = assoc->tgtport->port;
+ queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
+ INIT_LIST_HEAD(&queue->fod_list);
+ atomic_set(&queue->connected, 0);
+ atomic_set(&queue->sqtail, 0);
+ atomic_set(&queue->rsn, 1);
+ atomic_set(&queue->zrspcnt, 0);
+ spin_lock_init(&queue->qlock);
+ kref_init(&queue->ref);
+
+ nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
+
+ ret = nvmet_sq_init(&queue->nvme_sq);
+ if (ret)
+ goto out_fail_iodlist;
+
+ WARN_ON(assoc->queues[qid]);
+ spin_lock_irqsave(&assoc->tgtport->lock, flags);
+ assoc->queues[qid] = queue;
+ spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+
+ return queue;
+
+out_fail_iodlist:
+ nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
+ destroy_workqueue(queue->work_q);
+out_a_put:
+ nvmet_fc_tgt_a_put(assoc);
+out_free_queue:
+ kfree(queue);
+ return NULL;
+}
+
+
+static void
+nvmet_fc_tgt_queue_free(struct kref *ref)
+{
+ struct nvmet_fc_tgt_queue *queue =
+ container_of(ref, struct nvmet_fc_tgt_queue, ref);
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
+ queue->assoc->queues[queue->qid] = NULL;
+ spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+
+ nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
+
+ nvmet_fc_tgt_a_put(queue->assoc);
+
+ destroy_workqueue(queue->work_q);
+
+ kfree(queue);
+}
+
+static void
+nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
+{
+ kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
+}
+
+static int
+nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
+{
+ return kref_get_unless_zero(&queue->ref);
+}
+
+
+static void
+nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ int ret;
+
+ fcpreq->op = NVMET_FCOP_ABORT;
+ fcpreq->offset = 0;
+ fcpreq->timeout = 0;
+ fcpreq->transfer_length = 0;
+ fcpreq->transferred_length = 0;
+ fcpreq->fcp_error = 0;
+ fcpreq->sg_cnt = 0;
+
+ ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
+ if (ret)
+ /* should never reach here !! */
+ WARN_ON(1);
+}
+
+
+static void
+nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
+{
+ struct nvmet_fc_fcp_iod *fod = queue->fod;
+ unsigned long flags;
+ int i;
+ bool disconnect;
+
+ disconnect = atomic_xchg(&queue->connected, 0);
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ /* about outstanding io's */
+ for (i = 0; i < queue->sqsize; fod++, i++) {
+ if (fod->active) {
+ spin_lock(&fod->flock);
+ fod->abort = true;
+ spin_unlock(&fod->flock);
+ }
+ }
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ flush_workqueue(queue->work_q);
+
+ if (disconnect)
+ nvmet_sq_destroy(&queue->nvme_sq);
+
+ nvmet_fc_tgt_q_put(queue);
+}
+
+static struct nvmet_fc_tgt_queue *
+nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
+ u64 connection_id)
+{
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct nvmet_fc_tgt_queue *queue;
+ u64 association_id = nvmet_fc_getassociationid(connection_id);
+ u16 qid = nvmet_fc_getqueueid(connection_id);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ if (association_id == assoc->association_id) {
+ queue = assoc->queues[qid];
+ if (queue &&
+ (!atomic_read(&queue->connected) ||
+ !nvmet_fc_tgt_q_get(queue)))
+ queue = NULL;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ return queue;
+ }
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ return NULL;
+}
+
+static struct nvmet_fc_tgt_assoc *
+nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
+ unsigned long flags;
+ u64 ran;
+ int idx;
+ bool needrandom = true;
+
+ assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
+ if (!assoc)
+ return NULL;
+
+ idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
+ if (idx < 0)
+ goto out_free_assoc;
+
+ if (!nvmet_fc_tgtport_get(tgtport))
+ goto out_ida_put;
+
+ assoc->tgtport = tgtport;
+ assoc->a_id = idx;
+ INIT_LIST_HEAD(&assoc->a_list);
+ kref_init(&assoc->ref);
+
+ while (needrandom) {
+ get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
+ ran = ran << BYTES_FOR_QID_SHIFT;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ needrandom = false;
+ list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
+ if (ran == tmpassoc->association_id) {
+ needrandom = true;
+ break;
+ }
+ if (!needrandom) {
+ assoc->association_id = ran;
+ list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ }
+
+ return assoc;
+
+out_ida_put:
+ ida_simple_remove(&tgtport->assoc_cnt, idx);
+out_free_assoc:
+ kfree(assoc);
+ return NULL;
+}
+
+static void
+nvmet_fc_target_assoc_free(struct kref *ref)
+{
+ struct nvmet_fc_tgt_assoc *assoc =
+ container_of(ref, struct nvmet_fc_tgt_assoc, ref);
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_del(&assoc->a_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
+ kfree(assoc);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
+{
+ kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
+}
+
+static int
+nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
+{
+ return kref_get_unless_zero(&assoc->ref);
+}
+
+static void
+nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
+ queue = assoc->queues[i];
+ if (queue) {
+ if (!nvmet_fc_tgt_q_get(queue))
+ continue;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ nvmet_fc_delete_target_queue(queue);
+ nvmet_fc_tgt_q_put(queue);
+ spin_lock_irqsave(&tgtport->lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ nvmet_fc_tgt_a_put(assoc);
+}
+
+static struct nvmet_fc_tgt_assoc *
+nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
+ u64 association_id)
+{
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct nvmet_fc_tgt_assoc *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ if (association_id == assoc->association_id) {
+ ret = assoc;
+ nvmet_fc_tgt_a_get(assoc);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ return ret;
+}
+
+
+/**
+ * nvme_fc_register_targetport - transport entry point called by an
+ * LLDD to register the existence of a local
+ * NVME subystem FC port.
+ * @pinfo: pointer to information about the port to be registered
+ * @template: LLDD entrypoints and operational parameters for the port
+ * @dev: physical hardware device node port corresponds to. Will be
+ * used for DMA mappings
+ * @portptr: pointer to a local port pointer. Upon success, the routine
+ * will allocate a nvme_fc_local_port structure and place its
+ * address in the local port pointer. Upon failure, local port
+ * pointer will be set to NULL.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
+ struct nvmet_fc_target_template *template,
+ struct device *dev,
+ struct nvmet_fc_target_port **portptr)
+{
+ struct nvmet_fc_tgtport *newrec;
+ unsigned long flags;
+ int ret, idx;
+
+ if (!template->xmt_ls_rsp || !template->fcp_op ||
+ !template->targetport_delete ||
+ !template->max_hw_queues || !template->max_sgl_segments ||
+ !template->max_dif_sgl_segments || !template->dma_boundary) {
+ ret = -EINVAL;
+ goto out_regtgt_failed;
+ }
+
+ newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
+ GFP_KERNEL);
+ if (!newrec) {
+ ret = -ENOMEM;
+ goto out_regtgt_failed;
+ }
+
+ idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_fail_kfree;
+ }
+
+ if (!get_device(dev) && dev) {
+ ret = -ENODEV;
+ goto out_ida_put;
+ }
+
+ newrec->fc_target_port.node_name = pinfo->node_name;
+ newrec->fc_target_port.port_name = pinfo->port_name;
+ newrec->fc_target_port.private = &newrec[1];
+ newrec->fc_target_port.port_id = pinfo->port_id;
+ newrec->fc_target_port.port_num = idx;
+ INIT_LIST_HEAD(&newrec->tgt_list);
+ newrec->dev = dev;
+ newrec->ops = template;
+ spin_lock_init(&newrec->lock);
+ INIT_LIST_HEAD(&newrec->ls_list);
+ INIT_LIST_HEAD(&newrec->ls_busylist);
+ INIT_LIST_HEAD(&newrec->assoc_list);
+ kref_init(&newrec->ref);
+ ida_init(&newrec->assoc_cnt);
+
+ ret = nvmet_fc_alloc_ls_iodlist(newrec);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_free_newrec;
+ }
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ *portptr = &newrec->fc_target_port;
+ return 0;
+
+out_free_newrec:
+ put_device(dev);
+out_ida_put:
+ ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
+out_fail_kfree:
+ kfree(newrec);
+out_regtgt_failed:
+ *portptr = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
+
+
+static void
+nvmet_fc_free_tgtport(struct kref *ref)
+{
+ struct nvmet_fc_tgtport *tgtport =
+ container_of(ref, struct nvmet_fc_tgtport, ref);
+ struct device *dev = tgtport->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_del(&tgtport->tgt_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ nvmet_fc_free_ls_iodlist(tgtport);
+
+ /* let the LLDD know we've finished tearing it down */
+ tgtport->ops->targetport_delete(&tgtport->fc_target_port);
+
+ ida_simple_remove(&nvmet_fc_tgtport_cnt,
+ tgtport->fc_target_port.port_num);
+
+ ida_destroy(&tgtport->assoc_cnt);
+
+ kfree(tgtport);
+
+ put_device(dev);
+}
+
+static void
+nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
+{
+ kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
+}
+
+static int
+nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
+{
+ return kref_get_unless_zero(&tgtport->ref);
+}
+
+static void
+__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_tgt_assoc *assoc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry_safe(assoc, next,
+ &tgtport->assoc_list, a_list) {
+ if (!nvmet_fc_tgt_a_get(assoc))
+ continue;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+ spin_lock_irqsave(&tgtport->lock, flags);
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+}
+
+/*
+ * nvmet layer has called to terminate an association
+ */
+static void
+nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_fc_tgtport *tgtport, *next;
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
+ bool found_ctrl = false;
+
+ /* this is a bit ugly, but don't want to make locks layered */
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
+ tgt_list) {
+ if (!nvmet_fc_tgtport_get(tgtport))
+ continue;
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ queue = assoc->queues[0];
+ if (queue && queue->nvme_sq.ctrl == ctrl) {
+ if (nvmet_fc_tgt_a_get(assoc))
+ found_ctrl = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ nvmet_fc_tgtport_put(tgtport);
+
+ if (found_ctrl) {
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+ return;
+ }
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ }
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/**
+ * nvme_fc_unregister_targetport - transport entry point called by an
+ * LLDD to deregister/remove a previously
+ * registered a local NVME subsystem FC port.
+ * @tgtport: pointer to the (registered) target port that is to be
+ * deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+
+ /* terminate any outstanding associations */
+ __nvmet_fc_free_assocs(tgtport);
+
+ nvmet_fc_tgtport_put(tgtport);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
+
+
+/* *********************** FC-NVME LS Handling **************************** */
+
+
+static void
+nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd)
+{
+ struct fcnvme_ls_acc_hdr *acc = buf;
+
+ acc->w0.ls_cmd = ls_cmd;
+ acc->desc_list_len = desc_len;
+ acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+ acc->rqst.desc_len =
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+ acc->rqst.w0.ls_cmd = rqst_ls_cmd;
+}
+
+static int
+nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
+ u8 reason, u8 explanation, u8 vendor)
+{
+ struct fcnvme_ls_rjt *rjt = buf;
+
+ nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
+ ls_cmd);
+ rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+ rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+ rjt->rjt.reason_code = reason;
+ rjt->rjt.reason_explanation = explanation;
+ rjt->rjt.vendor = vendor;
+
+ return sizeof(struct fcnvme_ls_rjt);
+}
+
+/* Validation Error indexes into the string table below */
+enum {
+ VERR_NO_ERROR = 0,
+ VERR_CR_ASSOC_LEN = 1,
+ VERR_CR_ASSOC_RQST_LEN = 2,
+ VERR_CR_ASSOC_CMD = 3,
+ VERR_CR_ASSOC_CMD_LEN = 4,
+ VERR_ERSP_RATIO = 5,
+ VERR_ASSOC_ALLOC_FAIL = 6,
+ VERR_QUEUE_ALLOC_FAIL = 7,
+ VERR_CR_CONN_LEN = 8,
+ VERR_CR_CONN_RQST_LEN = 9,
+ VERR_ASSOC_ID = 10,
+ VERR_ASSOC_ID_LEN = 11,
+ VERR_NO_ASSOC = 12,
+ VERR_CONN_ID = 13,
+ VERR_CONN_ID_LEN = 14,
+ VERR_NO_CONN = 15,
+ VERR_CR_CONN_CMD = 16,
+ VERR_CR_CONN_CMD_LEN = 17,
+ VERR_DISCONN_LEN = 18,
+ VERR_DISCONN_RQST_LEN = 19,
+ VERR_DISCONN_CMD = 20,
+ VERR_DISCONN_CMD_LEN = 21,
+ VERR_DISCONN_SCOPE = 22,
+ VERR_RS_LEN = 23,
+ VERR_RS_RQST_LEN = 24,
+ VERR_RS_CMD = 25,
+ VERR_RS_CMD_LEN = 26,
+ VERR_RS_RCTL = 27,
+ VERR_RS_RO = 28,
+};
+
+static char *validation_errors[] = {
+ "OK",
+ "Bad CR_ASSOC Length",
+ "Bad CR_ASSOC Rqst Length",
+ "Not CR_ASSOC Cmd",
+ "Bad CR_ASSOC Cmd Length",
+ "Bad Ersp Ratio",
+ "Association Allocation Failed",
+ "Queue Allocation Failed",
+ "Bad CR_CONN Length",
+ "Bad CR_CONN Rqst Length",
+ "Not Association ID",
+ "Bad Association ID Length",
+ "No Association",
+ "Not Connection ID",
+ "Bad Connection ID Length",
+ "No Connection",
+ "Not CR_CONN Cmd",
+ "Bad CR_CONN Cmd Length",
+ "Bad DISCONN Length",
+ "Bad DISCONN Rqst Length",
+ "Not DISCONN Cmd",
+ "Bad DISCONN Cmd Length",
+ "Bad Disconnect Scope",
+ "Bad RS Length",
+ "Bad RS Rqst Length",
+ "Not RS Cmd",
+ "Bad RS Cmd Length",
+ "Bad RS R_CTL",
+ "Bad RS Relative Offset",
+};
+
+static void
+nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_cr_assoc_rqst *rqst =
+ (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
+ struct fcnvme_ls_cr_assoc_acc *acc =
+ (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
+ struct nvmet_fc_tgt_queue *queue;
+ int ret = 0;
+
+ memset(acc, 0, sizeof(*acc));
+
+ if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
+ ret = VERR_CR_ASSOC_LEN;
+ else if (rqst->desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_cr_assoc_rqst)))
+ ret = VERR_CR_ASSOC_RQST_LEN;
+ else if (rqst->assoc_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
+ ret = VERR_CR_ASSOC_CMD;
+ else if (rqst->assoc_cmd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
+ ret = VERR_CR_ASSOC_CMD_LEN;
+ else if (!rqst->assoc_cmd.ersp_ratio ||
+ (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
+ be16_to_cpu(rqst->assoc_cmd.sqsize)))
+ ret = VERR_ERSP_RATIO;
+
+ else {
+ /* new association w/ admin queue */
+ iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
+ if (!iod->assoc)
+ ret = VERR_ASSOC_ALLOC_FAIL;
+ else {
+ queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
+ be16_to_cpu(rqst->assoc_cmd.sqsize));
+ if (!queue)
+ ret = VERR_QUEUE_ALLOC_FAIL;
+ }
+ }
+
+ if (ret) {
+ dev_err(tgtport->dev,
+ "Create Association LS failed: %s\n",
+ validation_errors[ret]);
+ iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+ NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ ELS_RJT_LOGIC,
+ ELS_EXPL_NONE, 0);
+ return;
+ }
+
+ queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
+ atomic_set(&queue->connected, 1);
+ queue->sqhd = 0; /* best place to init value */
+
+ /* format a response */
+
+ iod->lsreq->rsplen = sizeof(*acc);
+
+ nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_cr_assoc_acc)),
+ FCNVME_LS_CREATE_ASSOCIATION);
+ acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ acc->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+ acc->associd.association_id =
+ cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
+ acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
+ acc->connectid.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_conn_id));
+ acc->connectid.connection_id = acc->associd.association_id;
+}
+
+static void
+nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_cr_conn_rqst *rqst =
+ (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
+ struct fcnvme_ls_cr_conn_acc *acc =
+ (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
+ struct nvmet_fc_tgt_queue *queue;
+ int ret = 0;
+
+ memset(acc, 0, sizeof(*acc));
+
+ if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
+ ret = VERR_CR_CONN_LEN;
+ else if (rqst->desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_cr_conn_rqst)))
+ ret = VERR_CR_CONN_RQST_LEN;
+ else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ ret = VERR_ASSOC_ID;
+ else if (rqst->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ ret = VERR_ASSOC_ID_LEN;
+ else if (rqst->connect_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
+ ret = VERR_CR_CONN_CMD;
+ else if (rqst->connect_cmd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
+ ret = VERR_CR_CONN_CMD_LEN;
+ else if (!rqst->connect_cmd.ersp_ratio ||
+ (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
+ be16_to_cpu(rqst->connect_cmd.sqsize)))
+ ret = VERR_ERSP_RATIO;
+
+ else {
+ /* new io queue */
+ iod->assoc = nvmet_fc_find_target_assoc(tgtport,
+ be64_to_cpu(rqst->associd.association_id));
+ if (!iod->assoc)
+ ret = VERR_NO_ASSOC;
+ else {
+ queue = nvmet_fc_alloc_target_queue(iod->assoc,
+ be16_to_cpu(rqst->connect_cmd.qid),
+ be16_to_cpu(rqst->connect_cmd.sqsize));
+ if (!queue)
+ ret = VERR_QUEUE_ALLOC_FAIL;
+
+ /* release get taken in nvmet_fc_find_target_assoc */
+ nvmet_fc_tgt_a_put(iod->assoc);
+ }
+ }
+
+ if (ret) {
+ dev_err(tgtport->dev,
+ "Create Connection LS failed: %s\n",
+ validation_errors[ret]);
+ iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+ NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ (ret == VERR_NO_ASSOC) ?
+ ELS_RJT_PROT : ELS_RJT_LOGIC,
+ ELS_EXPL_NONE, 0);
+ return;
+ }
+
+ queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
+ atomic_set(&queue->connected, 1);
+ queue->sqhd = 0; /* best place to init value */
+
+ /* format a response */
+
+ iod->lsreq->rsplen = sizeof(*acc);
+
+ nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
+ FCNVME_LS_CREATE_CONNECTION);
+ acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
+ acc->connectid.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_conn_id));
+ acc->connectid.connection_id =
+ cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
+ be16_to_cpu(rqst->connect_cmd.qid)));
+}
+
+static void
+nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_disconnect_rqst *rqst =
+ (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
+ struct fcnvme_ls_disconnect_acc *acc =
+ (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
+ struct nvmet_fc_tgt_queue *queue;
+ struct nvmet_fc_tgt_assoc *assoc;
+ int ret = 0;
+ bool del_assoc = false;
+
+ memset(acc, 0, sizeof(*acc));
+
+ if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
+ ret = VERR_DISCONN_LEN;
+ else if (rqst->desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_disconnect_rqst)))
+ ret = VERR_DISCONN_RQST_LEN;
+ else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ ret = VERR_ASSOC_ID;
+ else if (rqst->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ ret = VERR_ASSOC_ID_LEN;
+ else if (rqst->discon_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
+ ret = VERR_DISCONN_CMD;
+ else if (rqst->discon_cmd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd)))
+ ret = VERR_DISCONN_CMD_LEN;
+ else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
+ (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
+ ret = VERR_DISCONN_SCOPE;
+ else {
+ /* match an active association */
+ assoc = nvmet_fc_find_target_assoc(tgtport,
+ be64_to_cpu(rqst->associd.association_id));
+ iod->assoc = assoc;
+ if (!assoc)
+ ret = VERR_NO_ASSOC;
+ }
+
+ if (ret) {
+ dev_err(tgtport->dev,
+ "Disconnect LS failed: %s\n",
+ validation_errors[ret]);
+ iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+ NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ (ret == 8) ? ELS_RJT_PROT : ELS_RJT_LOGIC,
+ ELS_EXPL_NONE, 0);
+ return;
+ }
+
+ /* format a response */
+
+ iod->lsreq->rsplen = sizeof(*acc);
+
+ nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_disconnect_acc)),
+ FCNVME_LS_DISCONNECT);
+
+
+ if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
+ queue = nvmet_fc_find_target_queue(tgtport,
+ be64_to_cpu(rqst->discon_cmd.id));
+ if (queue) {
+ int qid = queue->qid;
+
+ nvmet_fc_delete_target_queue(queue);
+
+ /* release the get taken by find_target_queue */
+ nvmet_fc_tgt_q_put(queue);
+
+ /* tear association down if io queue terminated */
+ if (!qid)
+ del_assoc = true;
+ }
+ }
+
+ /* release get taken in nvmet_fc_find_target_assoc */
+ nvmet_fc_tgt_a_put(iod->assoc);
+
+ if (del_assoc)
+ nvmet_fc_delete_target_assoc(iod->assoc);
+}
+
+
+/* *********************** NVME Ctrl Routines **************************** */
+
+
+static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
+
+static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
+
+static void
+nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
+{
+ struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
+ struct nvmet_fc_tgtport *tgtport = iod->tgtport;
+
+ fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
+ NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ nvmet_fc_free_ls_iod(tgtport, iod);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ int ret;
+
+ fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
+ NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+
+ ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
+ if (ret)
+ nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_rqst_w0 *w0 =
+ (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
+
+ iod->lsreq->nvmet_fc_private = iod;
+ iod->lsreq->rspbuf = iod->rspbuf;
+ iod->lsreq->rspdma = iod->rspdma;
+ iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
+ /* Be preventative. handlers will later set to valid length */
+ iod->lsreq->rsplen = 0;
+
+ iod->assoc = NULL;
+
+ /*
+ * handlers:
+ * parse request input, execute the request, and format the
+ * LS response
+ */
+ switch (w0->ls_cmd) {
+ case FCNVME_LS_CREATE_ASSOCIATION:
+ /* Creates Association and initial Admin Queue/Connection */
+ nvmet_fc_ls_create_association(tgtport, iod);
+ break;
+ case FCNVME_LS_CREATE_CONNECTION:
+ /* Creates an IO Queue/Connection */
+ nvmet_fc_ls_create_connection(tgtport, iod);
+ break;
+ case FCNVME_LS_DISCONNECT:
+ /* Terminate a Queue/Connection or the Association */
+ nvmet_fc_ls_disconnect(tgtport, iod);
+ break;
+ default:
+ iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
+ NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
+ ELS_RJT_INVAL, ELS_EXPL_NONE, 0);
+ }
+
+ nvmet_fc_xmt_ls_rsp(tgtport, iod);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
+{
+ struct nvmet_fc_ls_iod *iod =
+ container_of(work, struct nvmet_fc_ls_iod, work);
+ struct nvmet_fc_tgtport *tgtport = iod->tgtport;
+
+ nvmet_fc_handle_ls_rqst(tgtport, iod);
+}
+
+
+/**
+ * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
+ * upon the reception of a NVME LS request.
+ *
+ * The nvmet-fc layer will copy payload to an internal structure for
+ * processing. As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the LS request buffer passed in the call.
+ *
+ * If this routine returns error, the LLDD should abort the exchange.
+ *
+ * @tgtport: pointer to the (registered) target port the LS was
+ * received on.
+ * @lsreq: pointer to a lsreq request structure to be used to reference
+ * the exchange corresponding to the LS.
+ * @lsreqbuf: pointer to the buffer containing the LS Request
+ * @lsreqbuf_len: length, in bytes, of the received LS request
+ */
+int
+nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
+ struct nvmefc_tgt_ls_req *lsreq,
+ void *lsreqbuf, u32 lsreqbuf_len)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ struct nvmet_fc_ls_iod *iod;
+
+ if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
+ return -E2BIG;
+
+ if (!nvmet_fc_tgtport_get(tgtport))
+ return -ESHUTDOWN;
+
+ iod = nvmet_fc_alloc_ls_iod(tgtport);
+ if (!iod) {
+ nvmet_fc_tgtport_put(tgtport);
+ return -ENOENT;
+ }
+
+ iod->lsreq = lsreq;
+ iod->fcpreq = NULL;
+ memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
+ iod->rqstdatalen = lsreqbuf_len;
+
+ schedule_work(&iod->work);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
+
+
+/*
+ * **********************
+ * Start of FCP handling
+ * **********************
+ */
+
+static int
+nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
+{
+ struct scatterlist *sg;
+ struct page *page;
+ unsigned int nent;
+ u32 page_len, length;
+ int i = 0;
+
+ length = fod->total_length;
+ nent = DIV_ROUND_UP(length, PAGE_SIZE);
+ sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
+ if (!sg)
+ goto out;
+
+ sg_init_table(sg, nent);
+
+ while (length) {
+ page_len = min_t(u32, length, PAGE_SIZE);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ goto out_free_pages;
+
+ sg_set_page(&sg[i], page, page_len, 0);
+ length -= page_len;
+ i++;
+ }
+
+ fod->data_sg = sg;
+ fod->data_sg_cnt = nent;
+ fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
+ ((fod->io_dir == NVMET_FCP_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE));
+ /* note: write from initiator perspective */
+
+ return 0;
+
+out_free_pages:
+ while (i > 0) {
+ i--;
+ __free_page(sg_page(&sg[i]));
+ }
+ kfree(sg);
+ fod->data_sg = NULL;
+ fod->data_sg_cnt = 0;
+out:
+ return NVME_SC_INTERNAL;
+}
+
+static void
+nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
+{
+ struct scatterlist *sg;
+ int count;
+
+ if (!fod->data_sg || !fod->data_sg_cnt)
+ return;
+
+ fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
+ ((fod->io_dir == NVMET_FCP_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE));
+ for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
+ __free_page(sg_page(sg));
+ kfree(fod->data_sg);
+}
+
+
+static bool
+queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
+{
+ u32 sqtail, used;
+
+ /* egad, this is ugly. And sqtail is just a best guess */
+ sqtail = atomic_read(&q->sqtail) % q->sqsize;
+
+ used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
+ return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
+}
+
+/*
+ * Prep RSP payload.
+ * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
+ */
+static void
+nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
+ struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
+ struct nvme_completion *cqe = &ersp->cqe;
+ u32 *cqewd = (u32 *)cqe;
+ bool send_ersp = false;
+ u32 rsn, rspcnt, xfr_length;
+
+ if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
+ xfr_length = fod->total_length;
+ else
+ xfr_length = fod->offset;
+
+ /*
+ * check to see if we can send a 0's rsp.
+ * Note: to send a 0's response, the NVME-FC host transport will
+ * recreate the CQE. The host transport knows: sq id, SQHD (last
+ * seen in an ersp), and command_id. Thus it will create a
+ * zero-filled CQE with those known fields filled in. Transport
+ * must send an ersp for any condition where the cqe won't match
+ * this.
+ *
+ * Here are the FC-NVME mandated cases where we must send an ersp:
+ * every N responses, where N=ersp_ratio
+ * force fabric commands to send ersp's (not in FC-NVME but good
+ * practice)
+ * normal cmds: any time status is non-zero, or status is zero
+ * but words 0 or 1 are non-zero.
+ * the SQ is 90% or more full
+ * the cmd is a fused command
+ * transferred data length not equal to cmd iu length
+ */
+ rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
+ if (!(rspcnt % fod->queue->ersp_ratio) ||
+ sqe->opcode == nvme_fabrics_command ||
+ xfr_length != fod->total_length ||
+ (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
+ (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
+ queue_90percent_full(fod->queue, cqe->sq_head))
+ send_ersp = true;
+
+ /* re-set the fields */
+ fod->fcpreq->rspaddr = ersp;
+ fod->fcpreq->rspdma = fod->rspdma;
+
+ if (!send_ersp) {
+ memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
+ fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
+ } else {
+ ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
+ rsn = atomic_inc_return(&fod->queue->rsn);
+ ersp->rsn = cpu_to_be32(rsn);
+ ersp->xfrd_len = cpu_to_be32(xfr_length);
+ fod->fcpreq->rsplen = sizeof(*ersp);
+ }
+
+ fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+}
+
+static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
+
+static void
+nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ int ret;
+
+ fod->fcpreq->op = NVMET_FCOP_RSP;
+ fod->fcpreq->timeout = 0;
+
+ nvmet_fc_prep_fcp_rsp(tgtport, fod);
+
+ ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
+ if (ret)
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+}
+
+static void
+nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod, u8 op)
+{
+ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+ struct scatterlist *sg, *datasg;
+ u32 tlen, sg_off;
+ int ret;
+
+ fcpreq->op = op;
+ fcpreq->offset = fod->offset;
+ fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
+ tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
+ (fod->total_length - fod->offset));
+ tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
+ tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
+ * PAGE_SIZE);
+ fcpreq->transfer_length = tlen;
+ fcpreq->transferred_length = 0;
+ fcpreq->fcp_error = 0;
+ fcpreq->rsplen = 0;
+
+ fcpreq->sg_cnt = 0;
+
+ datasg = fod->next_sg;
+ sg_off = fod->next_sg_offset;
+
+ for (sg = fcpreq->sg ; tlen; sg++) {
+ *sg = *datasg;
+ if (sg_off) {
+ sg->offset += sg_off;
+ sg->length -= sg_off;
+ sg->dma_address += sg_off;
+ sg_off = 0;
+ }
+ if (tlen < sg->length) {
+ sg->length = tlen;
+ fod->next_sg = datasg;
+ fod->next_sg_offset += tlen;
+ } else if (tlen == sg->length) {
+ fod->next_sg_offset = 0;
+ fod->next_sg = sg_next(datasg);
+ } else {
+ fod->next_sg_offset = 0;
+ datasg = sg_next(datasg);
+ }
+ tlen -= sg->length;
+ fcpreq->sg_cnt++;
+ }
+
+ /*
+ * If the last READDATA request: check if LLDD supports
+ * combined xfr with response.
+ */
+ if ((op == NVMET_FCOP_READDATA) &&
+ ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
+ (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
+ fcpreq->op = NVMET_FCOP_READDATA_RSP;
+ nvmet_fc_prep_fcp_rsp(tgtport, fod);
+ }
+
+ ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
+ if (ret) {
+ /*
+ * should be ok to set w/o lock as its in the thread of
+ * execution (not an async timer routine) and doesn't
+ * contend with any clearing action
+ */
+ fod->abort = true;
+
+ if (op == NVMET_FCOP_WRITEDATA)
+ nvmet_req_complete(&fod->req,
+ NVME_SC_FC_TRANSPORT_ERROR);
+ else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
+ fcpreq->fcp_error = ret;
+ fcpreq->transferred_length = 0;
+ nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
+ }
+ }
+}
+
+static void
+nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+ unsigned long flags;
+ bool abort;
+
+ spin_lock_irqsave(&fod->flock, flags);
+ abort = fod->abort;
+ spin_unlock_irqrestore(&fod->flock, flags);
+
+ /* if in the middle of an io and we need to tear down */
+ if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ if (fcpreq->fcp_error || abort)
+ nvmet_req_complete(&fod->req, fcpreq->fcp_error);
+
+ return;
+ }
+
+ switch (fcpreq->op) {
+
+ case NVMET_FCOP_WRITEDATA:
+ if (abort || fcpreq->fcp_error ||
+ fcpreq->transferred_length != fcpreq->transfer_length) {
+ nvmet_req_complete(&fod->req,
+ NVME_SC_FC_TRANSPORT_ERROR);
+ return;
+ }
+
+ fod->offset += fcpreq->transferred_length;
+ if (fod->offset != fod->total_length) {
+ /* transfer the next chunk */
+ nvmet_fc_transfer_fcp_data(tgtport, fod,
+ NVMET_FCOP_WRITEDATA);
+ return;
+ }
+
+ /* data transfer complete, resume with nvmet layer */
+
+ fod->req.execute(&fod->req);
+
+ break;
+
+ case NVMET_FCOP_READDATA:
+ case NVMET_FCOP_READDATA_RSP:
+ if (abort || fcpreq->fcp_error ||
+ fcpreq->transferred_length != fcpreq->transfer_length) {
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ return;
+ }
+
+ /* success */
+
+ if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+ fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+ nvmet_fc_free_fcp_iod(fod->queue, fod);
+ return;
+ }
+
+ fod->offset += fcpreq->transferred_length;
+ if (fod->offset != fod->total_length) {
+ /* transfer the next chunk */
+ nvmet_fc_transfer_fcp_data(tgtport, fod,
+ NVMET_FCOP_READDATA);
+ return;
+ }
+
+ /* data transfer complete, send response */
+
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ nvmet_fc_xmt_fcp_rsp(tgtport, fod);
+
+ break;
+
+ case NVMET_FCOP_RSP:
+ case NVMET_FCOP_ABORT:
+ fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+ nvmet_fc_free_fcp_iod(fod->queue, fod);
+ break;
+
+ default:
+ nvmet_fc_free_tgt_pgs(fod);
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ break;
+ }
+}
+
+/*
+ * actual completion handler after execution by the nvmet layer
+ */
+static void
+__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod, int status)
+{
+ struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
+ struct nvme_completion *cqe = &fod->rspiubuf.cqe;
+ unsigned long flags;
+ bool abort;
+
+ spin_lock_irqsave(&fod->flock, flags);
+ abort = fod->abort;
+ spin_unlock_irqrestore(&fod->flock, flags);
+
+ /* if we have a CQE, snoop the last sq_head value */
+ if (!status)
+ fod->queue->sqhd = cqe->sq_head;
+
+ if (abort) {
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ return;
+ }
+
+ /* if an error handling the cmd post initial parsing */
+ if (status) {
+ /* fudge up a failed CQE status for our transport error */
+ memset(cqe, 0, sizeof(*cqe));
+ cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
+ cqe->sq_id = cpu_to_le16(fod->queue->qid);
+ cqe->command_id = sqe->command_id;
+ cqe->status = cpu_to_le16(status);
+ } else {
+
+ /*
+ * try to push the data even if the SQE status is non-zero.
+ * There may be a status where data still was intended to
+ * be moved
+ */
+ if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
+ /* push the data over before sending rsp */
+ nvmet_fc_transfer_fcp_data(tgtport, fod,
+ NVMET_FCOP_READDATA);
+ return;
+ }
+
+ /* writes & no data - fall thru */
+ }
+
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ nvmet_fc_xmt_fcp_rsp(tgtport, fod);
+}
+
+
+static void
+nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
+{
+ struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+ __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
+}
+
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+void
+nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
+ int ret;
+
+ /*
+ * Fused commands are currently not supported in the linux
+ * implementation.
+ *
+ * As such, the implementation of the FC transport does not
+ * look at the fused commands and order delivery to the upper
+ * layer until we have both based on csn.
+ */
+
+ fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
+
+ fod->total_length = be32_to_cpu(cmdiu->data_len);
+ if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
+ fod->io_dir = NVMET_FCP_WRITE;
+ if (!nvme_is_write(&cmdiu->sqe))
+ goto transport_error;
+ } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
+ fod->io_dir = NVMET_FCP_READ;
+ if (nvme_is_write(&cmdiu->sqe))
+ goto transport_error;
+ } else {
+ fod->io_dir = NVMET_FCP_NODATA;
+ if (fod->total_length)
+ goto transport_error;
+ }
+
+ fod->req.cmd = &fod->cmdiubuf.sqe;
+ fod->req.rsp = &fod->rspiubuf.cqe;
+ fod->req.port = fod->queue->port;
+
+ /* ensure nvmet handlers will set cmd handler callback */
+ fod->req.execute = NULL;
+
+ /* clear any response payload */
+ memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
+
+ ret = nvmet_req_init(&fod->req,
+ &fod->queue->nvme_cq,
+ &fod->queue->nvme_sq,
+ &nvmet_fc_tgt_fcp_ops);
+ if (!ret) { /* bad SQE content */
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ return;
+ }
+
+ /* keep a running counter of tail position */
+ atomic_inc(&fod->queue->sqtail);
+
+ fod->data_sg = NULL;
+ fod->data_sg_cnt = 0;
+ if (fod->total_length) {
+ ret = nvmet_fc_alloc_tgt_pgs(fod);
+ if (ret) {
+ nvmet_req_complete(&fod->req, ret);
+ return;
+ }
+ }
+ fod->req.sg = fod->data_sg;
+ fod->req.sg_cnt = fod->data_sg_cnt;
+ fod->offset = 0;
+ fod->next_sg = fod->data_sg;
+ fod->next_sg_offset = 0;
+
+ if (fod->io_dir == NVMET_FCP_WRITE) {
+ /* pull the data over before invoking nvmet layer */
+ nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
+ return;
+ }
+
+ /*
+ * Reads or no data:
+ *
+ * can invoke the nvmet_layer now. If read data, cmd completion will
+ * push the data
+ */
+
+ fod->req.execute(&fod->req);
+
+ return;
+
+transport_error:
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
+{
+ struct nvmet_fc_fcp_iod *fod =
+ container_of(work, struct nvmet_fc_fcp_iod, work);
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+ nvmet_fc_handle_fcp_rqst(tgtport, fod);
+}
+
+/**
+ * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
+ * upon the reception of a NVME FCP CMD IU.
+ *
+ * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
+ * layer for processing.
+ *
+ * The nvmet-fc layer will copy cmd payload to an internal structure for
+ * processing. As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the CMD IU buffer passed in the call.
+ *
+ * If this routine returns error, the lldd should abort the exchange.
+ *
+ * @target_port: pointer to the (registered) target port the FCP CMD IU
+ * was receive on.
+ * @fcpreq: pointer to a fcpreq request structure to be used to reference
+ * the exchange corresponding to the FCP Exchange.
+ * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
+ * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
+ */
+int
+nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
+ struct nvmefc_tgt_fcp_req *fcpreq,
+ void *cmdiubuf, u32 cmdiubuf_len)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
+ struct nvmet_fc_tgt_queue *queue;
+ struct nvmet_fc_fcp_iod *fod;
+
+ /* validate iu, so the connection id can be used to find the queue */
+ if ((cmdiubuf_len != sizeof(*cmdiu)) ||
+ (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
+ (cmdiu->fc_id != NVME_CMD_FC_ID) ||
+ (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
+ return -EIO;
+
+
+ queue = nvmet_fc_find_target_queue(tgtport,
+ be64_to_cpu(cmdiu->connection_id));
+ if (!queue)
+ return -ENOTCONN;
+
+ /*
+ * note: reference taken by find_target_queue
+ * After successful fod allocation, the fod will inherit the
+ * ownership of that reference and will remove the reference
+ * when the fod is freed.
+ */
+
+ fod = nvmet_fc_alloc_fcp_iod(queue);
+ if (!fod) {
+ /* release the queue lookup reference */
+ nvmet_fc_tgt_q_put(queue);
+ return -ENOENT;
+ }
+
+ fcpreq->nvmet_fc_private = fod;
+ fod->fcpreq = fcpreq;
+ /*
+ * put all admin cmds on hw queue id 0. All io commands go to
+ * the respective hw queue based on a modulo basis
+ */
+ fcpreq->hwqid = queue->qid ?
+ ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
+ memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+
+ queue_work_on(queue->cpu, queue->work_q, &fod->work);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
+
+enum {
+ FCT_TRADDR_ERR = 0,
+ FCT_TRADDR_WWNN = 1 << 0,
+ FCT_TRADDR_WWPN = 1 << 1,
+};
+
+struct nvmet_fc_traddr {
+ u64 nn;
+ u64 pn;
+};
+
+static const match_table_t traddr_opt_tokens = {
+ { FCT_TRADDR_WWNN, "nn-%s" },
+ { FCT_TRADDR_WWPN, "pn-%s" },
+ { FCT_TRADDR_ERR, NULL }
+};
+
+static int
+nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ u64 token64;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, traddr_opt_tokens, args);
+ switch (token) {
+ case FCT_TRADDR_WWNN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ traddr->nn = token64;
+ break;
+ case FCT_TRADDR_WWPN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ traddr->pn = token64;
+ break;
+ default:
+ pr_warn("unknown traddr token or missing value '%s'\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ kfree(options);
+ return ret;
+}
+
+static int
+nvmet_fc_add_port(struct nvmet_port *port)
+{
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_traddr traddr = { 0L, 0L };
+ unsigned long flags;
+ int ret;
+
+ /* validate the address info */
+ if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
+ (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
+ return -EINVAL;
+
+ /* map the traddr address info to a target port */
+
+ ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
+ if (ret)
+ return ret;
+
+ ret = -ENXIO;
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
+ if ((tgtport->fc_target_port.node_name == traddr.nn) &&
+ (tgtport->fc_target_port.port_name == traddr.pn)) {
+ /* a FC port can only be 1 nvmet port id */
+ if (!tgtport->port) {
+ tgtport->port = port;
+ port->priv = tgtport;
+ ret = 0;
+ } else
+ ret = -EALREADY;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+ return ret;
+}
+
+static void
+nvmet_fc_remove_port(struct nvmet_port *port)
+{
+ struct nvmet_fc_tgtport *tgtport = port->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ if (tgtport->port == port) {
+ nvmet_fc_tgtport_put(tgtport);
+ tgtport->port = NULL;
+ }
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_FC,
+ .msdbd = 1,
+ .add_port = nvmet_fc_add_port,
+ .remove_port = nvmet_fc_remove_port,
+ .queue_response = nvmet_fc_fcp_nvme_cmd_done,
+ .delete_ctrl = nvmet_fc_delete_ctrl,
+};
+
+static int __init nvmet_fc_init_module(void)
+{
+ return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
+}
+
+static void __exit nvmet_fc_exit_module(void)
+{
+ /* sanity check - all lports should be removed */
+ if (!list_empty(&nvmet_fc_target_list))
+ pr_warn("%s: targetport list not empty\n", __func__);
+
+ nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
+
+ ida_destroy(&nvmet_fc_tgtport_cnt);
+}
+
+module_init(nvmet_fc_init_module);
+module_exit(nvmet_fc_exit_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
new file mode 100644
index 000000000000..bcb8ebeb01c5
--- /dev/null
+++ b/drivers/nvme/target/fcloop.c
@@ -0,0 +1,1148 @@
+/*
+ * Copyright (c) 2016 Avago Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <uapi/scsi/fc/fc_fs.h>
+
+#include "../host/nvme.h"
+#include "../target/nvmet.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+enum {
+ NVMF_OPT_ERR = 0,
+ NVMF_OPT_WWNN = 1 << 0,
+ NVMF_OPT_WWPN = 1 << 1,
+ NVMF_OPT_ROLES = 1 << 2,
+ NVMF_OPT_FCADDR = 1 << 3,
+ NVMF_OPT_LPWWNN = 1 << 4,
+ NVMF_OPT_LPWWPN = 1 << 5,
+};
+
+struct fcloop_ctrl_options {
+ int mask;
+ u64 wwnn;
+ u64 wwpn;
+ u32 roles;
+ u32 fcaddr;
+ u64 lpwwnn;
+ u64 lpwwpn;
+};
+
+static const match_table_t opt_tokens = {
+ { NVMF_OPT_WWNN, "wwnn=%s" },
+ { NVMF_OPT_WWPN, "wwpn=%s" },
+ { NVMF_OPT_ROLES, "roles=%d" },
+ { NVMF_OPT_FCADDR, "fcaddr=%x" },
+ { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
+ { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
+ { NVMF_OPT_ERR, NULL }
+};
+
+static int
+fcloop_parse_options(struct fcloop_ctrl_options *opts,
+ const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ u64 token64;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case NVMF_OPT_WWNN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->wwnn = token64;
+ break;
+ case NVMF_OPT_WWPN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->wwpn = token64;
+ break;
+ case NVMF_OPT_ROLES:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->roles = token;
+ break;
+ case NVMF_OPT_FCADDR:
+ if (match_hex(args, &token)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->fcaddr = token;
+ break;
+ case NVMF_OPT_LPWWNN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->lpwwnn = token64;
+ break;
+ case NVMF_OPT_LPWWPN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->lpwwpn = token64;
+ break;
+ default:
+ pr_warn("unknown parameter or missing value '%s'\n", p);
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ }
+
+out_free_options:
+ kfree(options);
+ return ret;
+}
+
+
+static int
+fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
+ const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ u64 token64;
+
+ *nname = -1;
+ *pname = -1;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, opt_tokens, args);
+ switch (token) {
+ case NVMF_OPT_WWNN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ *nname = token64;
+ break;
+ case NVMF_OPT_WWPN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ *pname = token64;
+ break;
+ default:
+ pr_warn("unknown parameter or missing value '%s'\n", p);
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ }
+
+out_free_options:
+ kfree(options);
+
+ if (!ret) {
+ if (*nname == -1)
+ return -EINVAL;
+ if (*pname == -1)
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+
+#define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
+
+#define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
+ NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
+
+#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
+
+#define ALL_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \
+ NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
+
+
+static DEFINE_SPINLOCK(fcloop_lock);
+static LIST_HEAD(fcloop_lports);
+static LIST_HEAD(fcloop_nports);
+
+struct fcloop_lport {
+ struct nvme_fc_local_port *localport;
+ struct list_head lport_list;
+ struct completion unreg_done;
+};
+
+struct fcloop_rport {
+ struct nvme_fc_remote_port *remoteport;
+ struct nvmet_fc_target_port *targetport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+};
+
+struct fcloop_tport {
+ struct nvmet_fc_target_port *targetport;
+ struct nvme_fc_remote_port *remoteport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+};
+
+struct fcloop_nport {
+ struct fcloop_rport *rport;
+ struct fcloop_tport *tport;
+ struct fcloop_lport *lport;
+ struct list_head nport_list;
+ struct kref ref;
+ struct completion rport_unreg_done;
+ struct completion tport_unreg_done;
+ u64 node_name;
+ u64 port_name;
+ u32 port_role;
+ u32 port_id;
+};
+
+struct fcloop_lsreq {
+ struct fcloop_tport *tport;
+ struct nvmefc_ls_req *lsreq;
+ struct work_struct work;
+ struct nvmefc_tgt_ls_req tgt_ls_req;
+ int status;
+};
+
+struct fcloop_fcpreq {
+ struct fcloop_tport *tport;
+ struct nvmefc_fcp_req *fcpreq;
+ u16 status;
+ struct work_struct work;
+ struct nvmefc_tgt_fcp_req tgt_fcp_req;
+};
+
+
+static inline struct fcloop_lsreq *
+tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
+{
+ return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
+}
+
+static inline struct fcloop_fcpreq *
+tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+ return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
+}
+
+
+static int
+fcloop_create_queue(struct nvme_fc_local_port *localport,
+ unsigned int qidx, u16 qsize,
+ void **handle)
+{
+ *handle = localport;
+ return 0;
+}
+
+static void
+fcloop_delete_queue(struct nvme_fc_local_port *localport,
+ unsigned int idx, void *handle)
+{
+}
+
+
+/*
+ * Transmit of LS RSP done (e.g. buffers all set). call back up
+ * initiator "done" flows.
+ */
+static void
+fcloop_tgt_lsrqst_done_work(struct work_struct *work)
+{
+ struct fcloop_lsreq *tls_req =
+ container_of(work, struct fcloop_lsreq, work);
+ struct fcloop_tport *tport = tls_req->tport;
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+
+ if (tport->remoteport)
+ lsreq->done(lsreq, tls_req->status);
+}
+
+static int
+fcloop_ls_req(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_req *lsreq)
+{
+ struct fcloop_lsreq *tls_req = lsreq->private;
+ struct fcloop_rport *rport = remoteport->private;
+ int ret = 0;
+
+ tls_req->lsreq = lsreq;
+ INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
+
+ if (!rport->targetport) {
+ tls_req->status = -ECONNREFUSED;
+ schedule_work(&tls_req->work);
+ return ret;
+ }
+
+ tls_req->status = 0;
+ tls_req->tport = rport->targetport->private;
+ ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
+ lsreq->rqstaddr, lsreq->rqstlen);
+
+ return ret;
+}
+
+static int
+fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
+ struct nvmefc_tgt_ls_req *tgt_lsreq)
+{
+ struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+
+ memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
+ ((lsreq->rsplen < tgt_lsreq->rsplen) ?
+ lsreq->rsplen : tgt_lsreq->rsplen));
+ tgt_lsreq->done(tgt_lsreq);
+
+ schedule_work(&tls_req->work);
+
+ return 0;
+}
+
+/*
+ * FCP IO operation done. call back up initiator "done" flows.
+ */
+static void
+fcloop_tgt_fcprqst_done_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, work);
+ struct fcloop_tport *tport = tfcp_req->tport;
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+
+ if (tport->remoteport) {
+ fcpreq->status = tfcp_req->status;
+ fcpreq->done(fcpreq);
+ }
+}
+
+
+static int
+fcloop_fcp_req(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *fcpreq)
+{
+ struct fcloop_fcpreq *tfcp_req = fcpreq->private;
+ struct fcloop_rport *rport = remoteport->private;
+ int ret = 0;
+
+ INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
+
+ if (!rport->targetport) {
+ tfcp_req->status = NVME_SC_FC_TRANSPORT_ERROR;
+ schedule_work(&tfcp_req->work);
+ return ret;
+ }
+
+ tfcp_req->fcpreq = fcpreq;
+ tfcp_req->tport = rport->targetport->private;
+
+ ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
+ fcpreq->cmdaddr, fcpreq->cmdlen);
+
+ return ret;
+}
+
+static void
+fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
+ struct scatterlist *io_sg, u32 offset, u32 length)
+{
+ void *data_p, *io_p;
+ u32 data_len, io_len, tlen;
+
+ io_p = sg_virt(io_sg);
+ io_len = io_sg->length;
+
+ for ( ; offset; ) {
+ tlen = min_t(u32, offset, io_len);
+ offset -= tlen;
+ io_len -= tlen;
+ if (!io_len) {
+ io_sg = sg_next(io_sg);
+ io_p = sg_virt(io_sg);
+ io_len = io_sg->length;
+ } else
+ io_p += tlen;
+ }
+
+ data_p = sg_virt(data_sg);
+ data_len = data_sg->length;
+
+ for ( ; length; ) {
+ tlen = min_t(u32, io_len, data_len);
+ tlen = min_t(u32, tlen, length);
+
+ if (op == NVMET_FCOP_WRITEDATA)
+ memcpy(data_p, io_p, tlen);
+ else
+ memcpy(io_p, data_p, tlen);
+
+ length -= tlen;
+
+ io_len -= tlen;
+ if ((!io_len) && (length)) {
+ io_sg = sg_next(io_sg);
+ io_p = sg_virt(io_sg);
+ io_len = io_sg->length;
+ } else
+ io_p += tlen;
+
+ data_len -= tlen;
+ if ((!data_len) && (length)) {
+ data_sg = sg_next(data_sg);
+ data_p = sg_virt(data_sg);
+ data_len = data_sg->length;
+ } else
+ data_p += tlen;
+ }
+}
+
+static int
+fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+ struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ u32 rsplen = 0, xfrlen = 0;
+ int fcp_err = 0;
+ u8 op = tgt_fcpreq->op;
+
+ switch (op) {
+ case NVMET_FCOP_WRITEDATA:
+ xfrlen = tgt_fcpreq->transfer_length;
+ fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl,
+ tgt_fcpreq->offset, xfrlen);
+ fcpreq->transferred_length += xfrlen;
+ break;
+
+ case NVMET_FCOP_READDATA:
+ case NVMET_FCOP_READDATA_RSP:
+ xfrlen = tgt_fcpreq->transfer_length;
+ fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl,
+ tgt_fcpreq->offset, xfrlen);
+ fcpreq->transferred_length += xfrlen;
+ if (op == NVMET_FCOP_READDATA)
+ break;
+
+ /* Fall-Thru to RSP handling */
+
+ case NVMET_FCOP_RSP:
+ rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
+ fcpreq->rsplen : tgt_fcpreq->rsplen);
+ memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
+ if (rsplen < tgt_fcpreq->rsplen)
+ fcp_err = -E2BIG;
+ fcpreq->rcv_rsplen = rsplen;
+ fcpreq->status = 0;
+ tfcp_req->status = 0;
+ break;
+
+ case NVMET_FCOP_ABORT:
+ tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
+ break;
+
+ default:
+ fcp_err = -EINVAL;
+ break;
+ }
+
+ tgt_fcpreq->transferred_length = xfrlen;
+ tgt_fcpreq->fcp_error = fcp_err;
+ tgt_fcpreq->done(tgt_fcpreq);
+
+ if ((!fcp_err) && (op == NVMET_FCOP_RSP ||
+ op == NVMET_FCOP_READDATA_RSP ||
+ op == NVMET_FCOP_ABORT))
+ schedule_work(&tfcp_req->work);
+
+ return 0;
+}
+
+static void
+fcloop_ls_abort(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_req *lsreq)
+{
+}
+
+static void
+fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *fcpreq)
+{
+}
+
+static void
+fcloop_localport_delete(struct nvme_fc_local_port *localport)
+{
+ struct fcloop_lport *lport = localport->private;
+
+ /* release any threads waiting for the unreg to complete */
+ complete(&lport->unreg_done);
+}
+
+static void
+fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
+{
+ struct fcloop_rport *rport = remoteport->private;
+
+ /* release any threads waiting for the unreg to complete */
+ complete(&rport->nport->rport_unreg_done);
+}
+
+static void
+fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
+{
+ struct fcloop_tport *tport = targetport->private;
+
+ /* release any threads waiting for the unreg to complete */
+ complete(&tport->nport->tport_unreg_done);
+}
+
+#define FCLOOP_HW_QUEUES 4
+#define FCLOOP_SGL_SEGS 256
+#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
+
+struct nvme_fc_port_template fctemplate = {
+ .localport_delete = fcloop_localport_delete,
+ .remoteport_delete = fcloop_remoteport_delete,
+ .create_queue = fcloop_create_queue,
+ .delete_queue = fcloop_delete_queue,
+ .ls_req = fcloop_ls_req,
+ .fcp_io = fcloop_fcp_req,
+ .ls_abort = fcloop_ls_abort,
+ .fcp_abort = fcloop_fcp_abort,
+ .max_hw_queues = FCLOOP_HW_QUEUES,
+ .max_sgl_segments = FCLOOP_SGL_SEGS,
+ .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
+ .dma_boundary = FCLOOP_DMABOUND_4G,
+ /* sizes of additional private data for data structures */
+ .local_priv_sz = sizeof(struct fcloop_lport),
+ .remote_priv_sz = sizeof(struct fcloop_rport),
+ .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
+ .fcprqst_priv_sz = sizeof(struct fcloop_fcpreq),
+};
+
+struct nvmet_fc_target_template tgttemplate = {
+ .targetport_delete = fcloop_targetport_delete,
+ .xmt_ls_rsp = fcloop_xmt_ls_rsp,
+ .fcp_op = fcloop_fcp_op,
+ .max_hw_queues = FCLOOP_HW_QUEUES,
+ .max_sgl_segments = FCLOOP_SGL_SEGS,
+ .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
+ .dma_boundary = FCLOOP_DMABOUND_4G,
+ /* optional features */
+ .target_features = NVMET_FCTGTFEAT_READDATA_RSP |
+ NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED,
+ /* sizes of additional private data for data structures */
+ .target_priv_sz = sizeof(struct fcloop_tport),
+};
+
+static ssize_t
+fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvme_fc_port_info pinfo;
+ struct fcloop_ctrl_options *opts;
+ struct nvme_fc_local_port *localport;
+ struct fcloop_lport *lport;
+ int ret;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ ret = fcloop_parse_options(opts, buf);
+ if (ret)
+ goto out_free_opts;
+
+ /* everything there ? */
+ if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
+ ret = -EINVAL;
+ goto out_free_opts;
+ }
+
+ pinfo.node_name = opts->wwnn;
+ pinfo.port_name = opts->wwpn;
+ pinfo.port_role = opts->roles;
+ pinfo.port_id = opts->fcaddr;
+
+ ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
+ if (!ret) {
+ unsigned long flags;
+
+ /* success */
+ lport = localport->private;
+ lport->localport = localport;
+ INIT_LIST_HEAD(&lport->lport_list);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_add_tail(&lport->lport_list, &fcloop_lports);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ /* mark all of the input buffer consumed */
+ ret = count;
+ }
+
+out_free_opts:
+ kfree(opts);
+ return ret ? ret : count;
+}
+
+
+static void
+__unlink_local_port(struct fcloop_lport *lport)
+{
+ list_del(&lport->lport_list);
+}
+
+static int
+__wait_localport_unreg(struct fcloop_lport *lport)
+{
+ int ret;
+
+ init_completion(&lport->unreg_done);
+
+ ret = nvme_fc_unregister_localport(lport->localport);
+
+ wait_for_completion(&lport->unreg_done);
+
+ return ret;
+}
+
+
+static ssize_t
+fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcloop_lport *tlport, *lport = NULL;
+ u64 nodename, portname;
+ unsigned long flags;
+ int ret;
+
+ ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tlport, &fcloop_lports, lport_list) {
+ if (tlport->localport->node_name == nodename &&
+ tlport->localport->port_name == portname) {
+ lport = tlport;
+ __unlink_local_port(lport);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (!lport)
+ return -ENOENT;
+
+ ret = __wait_localport_unreg(lport);
+
+ return ret ? ret : count;
+}
+
+static void
+fcloop_nport_free(struct kref *ref)
+{
+ struct fcloop_nport *nport =
+ container_of(ref, struct fcloop_nport, ref);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_del(&nport->nport_list);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ kfree(nport);
+}
+
+static void
+fcloop_nport_put(struct fcloop_nport *nport)
+{
+ kref_put(&nport->ref, fcloop_nport_free);
+}
+
+static int
+fcloop_nport_get(struct fcloop_nport *nport)
+{
+ return kref_get_unless_zero(&nport->ref);
+}
+
+static struct fcloop_nport *
+fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
+{
+ struct fcloop_nport *newnport, *nport = NULL;
+ struct fcloop_lport *tmplport, *lport = NULL;
+ struct fcloop_ctrl_options *opts;
+ unsigned long flags;
+ u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
+ int ret;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return NULL;
+
+ ret = fcloop_parse_options(opts, buf);
+ if (ret)
+ goto out_free_opts;
+
+ /* everything there ? */
+ if ((opts->mask & opts_mask) != opts_mask) {
+ ret = -EINVAL;
+ goto out_free_opts;
+ }
+
+ newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
+ if (!newnport)
+ goto out_free_opts;
+
+ INIT_LIST_HEAD(&newnport->nport_list);
+ newnport->node_name = opts->wwnn;
+ newnport->port_name = opts->wwpn;
+ if (opts->mask & NVMF_OPT_ROLES)
+ newnport->port_role = opts->roles;
+ if (opts->mask & NVMF_OPT_FCADDR)
+ newnport->port_id = opts->fcaddr;
+ kref_init(&newnport->ref);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
+ if (tmplport->localport->node_name == opts->wwnn &&
+ tmplport->localport->port_name == opts->wwpn)
+ goto out_invalid_opts;
+
+ if (tmplport->localport->node_name == opts->lpwwnn &&
+ tmplport->localport->port_name == opts->lpwwpn)
+ lport = tmplport;
+ }
+
+ if (remoteport) {
+ if (!lport)
+ goto out_invalid_opts;
+ newnport->lport = lport;
+ }
+
+ list_for_each_entry(nport, &fcloop_nports, nport_list) {
+ if (nport->node_name == opts->wwnn &&
+ nport->port_name == opts->wwpn) {
+ if ((remoteport && nport->rport) ||
+ (!remoteport && nport->tport)) {
+ nport = NULL;
+ goto out_invalid_opts;
+ }
+
+ fcloop_nport_get(nport);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (remoteport)
+ nport->lport = lport;
+ if (opts->mask & NVMF_OPT_ROLES)
+ nport->port_role = opts->roles;
+ if (opts->mask & NVMF_OPT_FCADDR)
+ nport->port_id = opts->fcaddr;
+ goto out_free_newnport;
+ }
+ }
+
+ list_add_tail(&newnport->nport_list, &fcloop_nports);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ kfree(opts);
+ return newnport;
+
+out_invalid_opts:
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+out_free_newnport:
+ kfree(newnport);
+out_free_opts:
+ kfree(opts);
+ return nport;
+}
+
+static ssize_t
+fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvme_fc_remote_port *remoteport;
+ struct fcloop_nport *nport;
+ struct fcloop_rport *rport;
+ struct nvme_fc_port_info pinfo;
+ int ret;
+
+ nport = fcloop_alloc_nport(buf, count, true);
+ if (!nport)
+ return -EIO;
+
+ pinfo.node_name = nport->node_name;
+ pinfo.port_name = nport->port_name;
+ pinfo.port_role = nport->port_role;
+ pinfo.port_id = nport->port_id;
+
+ ret = nvme_fc_register_remoteport(nport->lport->localport,
+ &pinfo, &remoteport);
+ if (ret || !remoteport) {
+ fcloop_nport_put(nport);
+ return ret;
+ }
+
+ /* success */
+ rport = remoteport->private;
+ rport->remoteport = remoteport;
+ rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
+ if (nport->tport) {
+ nport->tport->remoteport = remoteport;
+ nport->tport->lport = nport->lport;
+ }
+ rport->nport = nport;
+ rport->lport = nport->lport;
+ nport->rport = rport;
+
+ return ret ? ret : count;
+}
+
+
+static struct fcloop_rport *
+__unlink_remote_port(struct fcloop_nport *nport)
+{
+ struct fcloop_rport *rport = nport->rport;
+
+ if (rport && nport->tport)
+ nport->tport->remoteport = NULL;
+ nport->rport = NULL;
+
+ return rport;
+}
+
+static int
+__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
+{
+ int ret;
+
+ if (!rport)
+ return -EALREADY;
+
+ init_completion(&nport->rport_unreg_done);
+
+ ret = nvme_fc_unregister_remoteport(rport->remoteport);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&nport->rport_unreg_done);
+
+ fcloop_nport_put(nport);
+
+ return ret;
+}
+
+static ssize_t
+fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcloop_nport *nport = NULL, *tmpport;
+ static struct fcloop_rport *rport;
+ u64 nodename, portname;
+ unsigned long flags;
+ int ret;
+
+ ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
+ if (tmpport->node_name == nodename &&
+ tmpport->port_name == portname && tmpport->rport) {
+ nport = tmpport;
+ rport = __unlink_remote_port(nport);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (!nport)
+ return -ENOENT;
+
+ ret = __wait_remoteport_unreg(nport, rport);
+
+ return ret ? ret : count;
+}
+
+static ssize_t
+fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvmet_fc_target_port *targetport;
+ struct fcloop_nport *nport;
+ struct fcloop_tport *tport;
+ struct nvmet_fc_port_info tinfo;
+ int ret;
+
+ nport = fcloop_alloc_nport(buf, count, false);
+ if (!nport)
+ return -EIO;
+
+ tinfo.node_name = nport->node_name;
+ tinfo.port_name = nport->port_name;
+ tinfo.port_id = nport->port_id;
+
+ ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
+ &targetport);
+ if (ret) {
+ fcloop_nport_put(nport);
+ return ret;
+ }
+
+ /* success */
+ tport = targetport->private;
+ tport->targetport = targetport;
+ tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
+ if (nport->rport)
+ nport->rport->targetport = targetport;
+ tport->nport = nport;
+ tport->lport = nport->lport;
+ nport->tport = tport;
+
+ return ret ? ret : count;
+}
+
+
+static struct fcloop_tport *
+__unlink_target_port(struct fcloop_nport *nport)
+{
+ struct fcloop_tport *tport = nport->tport;
+
+ if (tport && nport->rport)
+ nport->rport->targetport = NULL;
+ nport->tport = NULL;
+
+ return tport;
+}
+
+static int
+__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
+{
+ int ret;
+
+ if (!tport)
+ return -EALREADY;
+
+ init_completion(&nport->tport_unreg_done);
+
+ ret = nvmet_fc_unregister_targetport(tport->targetport);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&nport->tport_unreg_done);
+
+ fcloop_nport_put(nport);
+
+ return ret;
+}
+
+static ssize_t
+fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcloop_nport *nport = NULL, *tmpport;
+ struct fcloop_tport *tport;
+ u64 nodename, portname;
+ unsigned long flags;
+ int ret;
+
+ ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
+ if (tmpport->node_name == nodename &&
+ tmpport->port_name == portname && tmpport->tport) {
+ nport = tmpport;
+ tport = __unlink_target_port(nport);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (!nport)
+ return -ENOENT;
+
+ ret = __wait_targetport_unreg(nport, tport);
+
+ return ret ? ret : count;
+}
+
+
+static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
+static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
+static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
+static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
+static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
+static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
+
+static struct attribute *fcloop_dev_attrs[] = {
+ &dev_attr_add_local_port.attr,
+ &dev_attr_del_local_port.attr,
+ &dev_attr_add_remote_port.attr,
+ &dev_attr_del_remote_port.attr,
+ &dev_attr_add_target_port.attr,
+ &dev_attr_del_target_port.attr,
+ NULL
+};
+
+static struct attribute_group fclopp_dev_attrs_group = {
+ .attrs = fcloop_dev_attrs,
+};
+
+static const struct attribute_group *fcloop_dev_attr_groups[] = {
+ &fclopp_dev_attrs_group,
+ NULL,
+};
+
+static struct class *fcloop_class;
+static struct device *fcloop_device;
+
+
+static int __init fcloop_init(void)
+{
+ int ret;
+
+ fcloop_class = class_create(THIS_MODULE, "fcloop");
+ if (IS_ERR(fcloop_class)) {
+ pr_err("couldn't register class fcloop\n");
+ ret = PTR_ERR(fcloop_class);
+ return ret;
+ }
+
+ fcloop_device = device_create_with_groups(
+ fcloop_class, NULL, MKDEV(0, 0), NULL,
+ fcloop_dev_attr_groups, "ctl");
+ if (IS_ERR(fcloop_device)) {
+ pr_err("couldn't create ctl device!\n");
+ ret = PTR_ERR(fcloop_device);
+ goto out_destroy_class;
+ }
+
+ get_device(fcloop_device);
+
+ return 0;
+
+out_destroy_class:
+ class_destroy(fcloop_class);
+ return ret;
+}
+
+static void __exit fcloop_exit(void)
+{
+ struct fcloop_lport *lport;
+ struct fcloop_nport *nport;
+ struct fcloop_tport *tport;
+ struct fcloop_rport *rport;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ for (;;) {
+ nport = list_first_entry_or_null(&fcloop_nports,
+ typeof(*nport), nport_list);
+ if (!nport)
+ break;
+
+ tport = __unlink_target_port(nport);
+ rport = __unlink_remote_port(nport);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ ret = __wait_targetport_unreg(nport, tport);
+ if (ret)
+ pr_warn("%s: Failed deleting target port\n", __func__);
+
+ ret = __wait_remoteport_unreg(nport, rport);
+ if (ret)
+ pr_warn("%s: Failed deleting remote port\n", __func__);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ }
+
+ for (;;) {
+ lport = list_first_entry_or_null(&fcloop_lports,
+ typeof(*lport), lport_list);
+ if (!lport)
+ break;
+
+ __unlink_local_port(lport);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ ret = __wait_localport_unreg(lport);
+ if (ret)
+ pr_warn("%s: Failed deleting local port\n", __func__);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ put_device(fcloop_device);
+
+ device_destroy(fcloop_class, MKDEV(0, 0));
+ class_destroy(fcloop_class);
+}
+
+module_init(fcloop_init);
+module_exit(fcloop_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 4a96c2049b7b..4195115c7e54 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -37,9 +37,7 @@ static void nvmet_inline_bio_init(struct nvmet_req *req)
{
struct bio *bio = &req->inline_bio;
- bio_init(bio);
- bio->bi_max_vecs = NVMET_MAX_INLINE_BIOVEC;
- bio->bi_io_vec = req->inline_bvec;
+ bio_init(bio, req->inline_bvec, NVMET_MAX_INLINE_BIOVEC);
}
static void nvmet_execute_rw(struct nvmet_req *req)
@@ -58,7 +56,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
if (req->cmd->rw.opcode == nvme_cmd_write) {
op = REQ_OP_WRITE;
- op_flags = WRITE_ODIRECT;
+ op_flags = REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
op_flags |= REQ_FUA;
} else {
@@ -96,7 +94,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
cookie = submit_bio(bio);
- blk_poll(bdev_get_queue(req->ns->bdev), cookie);
+ blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie);
}
static void nvmet_execute_flush(struct nvmet_req *req)
@@ -109,7 +107,7 @@ static void nvmet_execute_flush(struct nvmet_req *req)
bio->bi_bdev = req->ns->bdev;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(bio);
}
@@ -172,6 +170,32 @@ static void nvmet_execute_dsm(struct nvmet_req *req)
}
}
+static void nvmet_execute_write_zeroes(struct nvmet_req *req)
+{
+ struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
+ struct bio *bio = NULL;
+ u16 status = NVME_SC_SUCCESS;
+ sector_t sector;
+ sector_t nr_sector;
+
+ sector = le64_to_cpu(write_zeroes->slba) <<
+ (req->ns->blksize_shift - 9);
+ nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) <<
+ (req->ns->blksize_shift - 9)) + 1;
+
+ if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
+ GFP_KERNEL, &bio, true))
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
+
+ if (bio) {
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+ submit_bio(bio);
+ } else {
+ nvmet_req_complete(req, status);
+ }
+}
+
int nvmet_parse_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -209,6 +233,9 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
req->data_len = le32_to_cpu(cmd->dsm.nr + 1) *
sizeof(struct nvme_dsm_range);
return 0;
+ case nvme_cmd_write_zeroes:
+ req->execute = nvmet_execute_write_zeroes;
+ return 0;
default:
pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d5df77d686b2..9aaa70071ae5 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -36,6 +36,7 @@
(NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
struct nvme_loop_iod {
+ struct nvme_request nvme_req;
struct nvme_command cmd;
struct nvme_completion rsp;
struct nvmet_req req;
@@ -112,10 +113,10 @@ static void nvme_loop_complete_rq(struct request *req)
blk_mq_end_request(req, error);
}
-static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
+static void nvme_loop_queue_response(struct nvmet_req *req)
{
struct nvme_loop_iod *iod =
- container_of(nvme_req, struct nvme_loop_iod, req);
+ container_of(req, struct nvme_loop_iod, req);
struct nvme_completion *cqe = &iod->rsp;
/*
@@ -126,13 +127,13 @@ static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
*/
if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
- nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
+ nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status,
+ &cqe->result);
} else {
- struct request *req = blk_mq_rq_from_pdu(iod);
+ struct request *rq = blk_mq_rq_from_pdu(iod);
- if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
- memcpy(req->special, cqe, sizeof(*cqe));
- blk_mq_complete_request(req, le16_to_cpu(cqe->status) >> 1);
+ iod->nvme_req.result = cqe->result;
+ blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
}
}
@@ -168,7 +169,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
int ret;
ret = nvme_setup_cmd(ns, req, &iod->cmd);
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
@@ -178,26 +179,25 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
nvme_cleanup_cmd(req);
blk_mq_start_request(req);
nvme_loop_queue_response(&iod->req);
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
}
if (blk_rq_bytes(req)) {
iod->sg_table.sgl = iod->first_sgl;
ret = sg_alloc_table_chained(&iod->sg_table,
- req->nr_phys_segments, iod->sg_table.sgl);
+ blk_rq_nr_phys_segments(req),
+ iod->sg_table.sgl);
if (ret)
return BLK_MQ_RQ_QUEUE_BUSY;
iod->req.sg = iod->sg_table.sgl;
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
- BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
}
- iod->cmd.common.command_id = req->tag;
blk_mq_start_request(req);
schedule_work(&iod->work);
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
}
static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 76b6eedccaf9..23d5eb1c944f 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -47,6 +47,7 @@ struct nvmet_ns {
loff_t size;
u8 nguid[16];
+ bool enabled;
struct nvmet_subsys *subsys;
const char *device_path;
@@ -61,11 +62,6 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
return container_of(to_config_group(item), struct nvmet_ns, group);
}
-static inline bool nvmet_ns_enabled(struct nvmet_ns *ns)
-{
- return !list_empty_careful(&ns->dev_link);
-}
-
struct nvmet_cq {
u16 qid;
u16 size;
@@ -238,7 +234,7 @@ static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
- req->rsp->result = cpu_to_le32(result);
+ req->rsp->result.u32 = cpu_to_le32(result);
}
/*
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 005ef5d17a19..3fbcdb7a583c 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1045,8 +1045,10 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
}
ret = nvmet_sq_init(&queue->nvme_sq);
- if (ret)
+ if (ret) {
+ ret = NVME_RDMA_CM_NO_RSC;
goto out_free_queue;
+ }
ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
if (ret)
@@ -1116,6 +1118,7 @@ out_destroy_sq:
out_free_queue:
kfree(queue);
out_reject:
+ pr_debug("rejecting connect request with status code %d\n", ret);
nvmet_rdma_cm_reject(cm_id, ret);
return NULL;
}
@@ -1129,7 +1132,8 @@ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
rdma_notify(queue->cm_id, event->event);
break;
default:
- pr_err("received unrecognized IB QP event %d\n", event->event);
+ pr_err("received IB QP event: %s (%d)\n",
+ ib_event_msg(event->event), event->event);
break;
}
}
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 38d938d7fe67..1520596f54a6 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -173,6 +173,7 @@ hv_storvsc-y := storvsc_drv.o
sd_mod-objs := sd.o
sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
+sd_mod-$(CONFIG_BLK_DEV_ZONED) += sd_zbc.o
sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 7bb20684e9fa..db03c49e2350 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -154,7 +154,8 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE,
buff, bufflen, sshdr,
ALUA_FAILOVER_TIMEOUT * HZ,
- ALUA_FAILOVER_RETRIES, NULL, req_flags);
+ ALUA_FAILOVER_RETRIES, NULL,
+ req_flags, 0);
}
/*
@@ -187,7 +188,8 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
stpg_data, stpg_len,
sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
- ALUA_FAILOVER_RETRIES, NULL, req_flags);
+ ALUA_FAILOVER_RETRIES, NULL,
+ req_flags, 0);
}
static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
@@ -1066,7 +1068,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
state != SCSI_ACCESS_STATE_ACTIVE &&
state != SCSI_ACCESS_STATE_LBA) {
ret = BLKPREP_KILL;
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
}
return ret;
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 375d81850f15..5b80746980b8 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -452,7 +452,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
if (h->lun_state != CLARIION_LUN_OWNED) {
ret = BLKPREP_KILL;
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
}
return ret;
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 9406d5f4a3d3..308e87195dc1 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -266,7 +266,7 @@ static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
if (h->path_state != HP_SW_PATH_ACTIVE) {
ret = BLKPREP_KILL;
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
}
return ret;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 06fbd0b0c68a..00d9c326158e 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -724,7 +724,7 @@ static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
if (h->state != RDAC_STATE_ACTIVE) {
ret = BLKPREP_KILL;
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
}
return ret;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 2f2a9910e30e..ef99f62831fb 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1595,7 +1595,7 @@ static int _init_blk_request(struct osd_request *or,
}
or->request = req;
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
req->timeout = or->timeout;
req->retries = or->retries;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 5033223f6287..a2960f5d98ec 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -368,7 +368,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
return DRIVER_ERROR << 24;
blk_rq_set_block_pc(req);
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
SRpnt->bio = NULL;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1deb6adc411f..75455d4dab68 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -621,6 +621,9 @@ int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
wmb();
}
+ if (sdev->request_queue)
+ blk_set_queue_depth(sdev->request_queue, depth);
+
return sdev->queue_depth;
}
EXPORT_SYMBOL(scsi_change_queue_depth);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 106a6adbd6f1..996e134d79fa 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1988,7 +1988,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
req->cmd_len = COMMAND_SIZE(req->cmd[0]);
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
req->timeout = 10 * HZ;
req->retries = 5;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 2cca9cffc63f..9a8ccff1121f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -86,10 +86,8 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
- struct request_queue *q = cmd->request->q;
- blk_mq_requeue_request(cmd->request);
- blk_mq_kick_requeue_list(q);
+ blk_mq_requeue_request(cmd->request, true);
put_device(&sdev->sdev_gendev);
}
@@ -163,26 +161,11 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
__scsi_queue_insert(cmd, reason, 1);
}
-/**
- * scsi_execute - insert request and wait for the result
- * @sdev: scsi device
- * @cmd: scsi command
- * @data_direction: data direction
- * @buffer: data buffer
- * @bufflen: len of buffer
- * @sense: optional sense buffer
- * @timeout: request timeout in seconds
- * @retries: number of times to retry request
- * @flags: or into request flags;
- * @resid: optional residual length
- *
- * returns the req->errors value which is the scsi_cmnd result
- * field.
- */
-int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+
+static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, int timeout, int retries, u64 flags,
- int *resid)
+ req_flags_t rq_flags, int *resid)
{
struct request *req;
int write = (data_direction == DMA_TO_DEVICE);
@@ -203,7 +186,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req->sense_len = 0;
req->retries = retries;
req->timeout = timeout;
- req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
+ req->cmd_flags |= flags;
+ req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
/*
* head injection *required* here otherwise quiesce won't work
@@ -227,12 +211,37 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
return ret;
}
+
+/**
+ * scsi_execute - insert request and wait for the result
+ * @sdev: scsi device
+ * @cmd: scsi command
+ * @data_direction: data direction
+ * @buffer: data buffer
+ * @bufflen: len of buffer
+ * @sense: optional sense buffer
+ * @timeout: request timeout in seconds
+ * @retries: number of times to retry request
+ * @flags: or into request flags;
+ * @resid: optional residual length
+ *
+ * returns the req->errors value which is the scsi_cmnd result
+ * field.
+ */
+int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+ int data_direction, void *buffer, unsigned bufflen,
+ unsigned char *sense, int timeout, int retries, u64 flags,
+ int *resid)
+{
+ return __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense,
+ timeout, retries, flags, 0, resid);
+}
EXPORT_SYMBOL(scsi_execute);
int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
struct scsi_sense_hdr *sshdr, int timeout, int retries,
- int *resid, u64 flags)
+ int *resid, u64 flags, req_flags_t rq_flags)
{
char *sense = NULL;
int result;
@@ -242,8 +251,8 @@ int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
if (!sense)
return DRIVER_ERROR << 24;
}
- result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
- sense, timeout, retries, flags, resid);
+ result = __scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
+ sense, timeout, retries, flags, rq_flags, resid);
if (sshdr)
scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
@@ -813,7 +822,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
;
- else if (!(req->cmd_flags & REQ_QUIET))
+ else if (!(req->rq_flags & RQF_QUIET))
scsi_print_sense(cmd);
result = 0;
/* BLOCK_PC may have set error */
@@ -943,7 +952,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
switch (action) {
case ACTION_FAIL:
/* Give up and fail the remainder of the request */
- if (!(req->cmd_flags & REQ_QUIET)) {
+ if (!(req->rq_flags & RQF_QUIET)) {
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
@@ -972,7 +981,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* A new command will be prepared and issued.
*/
if (q->mq_ops) {
- cmd->request->cmd_flags &= ~REQ_DONTPREP;
+ cmd->request->rq_flags &= ~RQF_DONTPREP;
scsi_mq_uninit_cmd(cmd);
scsi_mq_requeue_cmd(cmd);
} else {
@@ -998,8 +1007,8 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
/*
* If sg table allocation fails, requeue request later.
*/
- if (unlikely(sg_alloc_table_chained(&sdb->table, req->nr_phys_segments,
- sdb->table.sgl)))
+ if (unlikely(sg_alloc_table_chained(&sdb->table,
+ blk_rq_nr_phys_segments(req), sdb->table.sgl)))
return BLKPREP_DEFER;
/*
@@ -1031,7 +1040,7 @@ int scsi_init_io(struct scsi_cmnd *cmd)
bool is_mq = (rq->mq_ctx != NULL);
int error;
- BUG_ON(!rq->nr_phys_segments);
+ BUG_ON(!blk_rq_nr_phys_segments(rq));
error = scsi_init_sgtable(rq, &cmd->sdb);
if (error)
@@ -1234,7 +1243,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
/*
* If the devices is blocked we defer normal commands.
*/
- if (!(req->cmd_flags & REQ_PREEMPT))
+ if (!(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_DEFER;
break;
default:
@@ -1243,7 +1252,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
* special commands. In particular any user initiated
* command is not allowed.
*/
- if (!(req->cmd_flags & REQ_PREEMPT))
+ if (!(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_KILL;
break;
}
@@ -1279,7 +1288,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
blk_delay_queue(q, SCSI_QUEUE_DELAY);
break;
default:
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
}
return ret;
@@ -1736,7 +1745,7 @@ static void scsi_request_fn(struct request_queue *q)
* we add the dev to the starved list so it eventually gets
* a run when a tag is freed.
*/
- if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
+ if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
spin_lock_irq(shost->host_lock);
if (list_empty(&sdev->starved_entry))
list_add_tail(&sdev->starved_entry,
@@ -1801,7 +1810,7 @@ static inline int prep_to_mq(int ret)
{
switch (ret) {
case BLKPREP_OK:
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
case BLKPREP_DEFER:
return BLK_MQ_RQ_QUEUE_BUSY;
default:
@@ -1888,7 +1897,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
int reason;
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out;
ret = BLK_MQ_RQ_QUEUE_BUSY;
@@ -1903,11 +1912,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
goto out_dec_target_busy;
- if (!(req->cmd_flags & REQ_DONTPREP)) {
+ if (!(req->rq_flags & RQF_DONTPREP)) {
ret = prep_to_mq(scsi_mq_prep_fn(req));
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_dec_host_busy;
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
} else {
blk_mq_start_request(req);
}
@@ -1941,7 +1950,6 @@ out_put_device:
out:
switch (ret) {
case BLK_MQ_RQ_QUEUE_BUSY:
- blk_mq_stop_hw_queue(hctx);
if (atomic_read(&sdev->device_busy) == 0 &&
!scsi_device_blocked(sdev))
blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
@@ -1952,7 +1960,7 @@ out:
* we hit an error, as we will never see this command
* again.
*/
- if (req->cmd_flags & REQ_DONTPREP)
+ if (req->rq_flags & RQF_DONTPREP)
scsi_mq_uninit_cmd(cmd);
break;
default:
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 51e56296f465..079c2d9759fb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -93,6 +93,7 @@ MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
#define SD_MINORS 16
@@ -163,7 +164,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
static const char temp[] = "temporary ";
int len;
- if (sdp->type != TYPE_DISK)
+ if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
/* no cache control on RBC devices; theoretically they
* can do it, but there's probably so many exceptions
* it's not worth the risk */
@@ -262,7 +263,7 @@ allow_restart_store(struct device *dev, struct device_attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (sdp->type != TYPE_DISK)
+ if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
return -EINVAL;
sdp->allow_restart = simple_strtoul(buf, NULL, 10);
@@ -392,6 +393,11 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ if (sd_is_zoned(sdkp)) {
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ return count;
+ }
+
if (sdp->type != TYPE_DISK)
return -EINVAL;
@@ -459,7 +465,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (sdp->type != TYPE_DISK)
+ if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
return -EINVAL;
err = kstrtoul(buf, 10, &max);
@@ -710,7 +716,6 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t sector = blk_rq_pos(rq);
unsigned int nr_sectors = blk_rq_sectors(rq);
- unsigned int nr_bytes = blk_rq_bytes(rq);
unsigned int len;
int ret;
char *buf;
@@ -766,24 +771,19 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
goto out;
}
- rq->completion_data = page;
rq->timeout = SD_TIMEOUT;
cmd->transfersize = len;
cmd->allowed = SD_MAX_RETRIES;
- /*
- * Initially __data_len is set to the amount of data that needs to be
- * transferred to the target. This amount depends on whether WRITE SAME
- * or UNMAP is being used. After the scatterlist has been mapped by
- * scsi_init_io() we set __data_len to the size of the area to be
- * discarded on disk. This allows us to report completion on the full
- * amount of blocks described by the request.
- */
- blk_add_request_payload(rq, page, 0, len);
- ret = scsi_init_io(cmd);
- rq->__data_len = nr_bytes;
+ rq->special_vec.bv_page = page;
+ rq->special_vec.bv_offset = 0;
+ rq->special_vec.bv_len = len;
+
+ rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
+ rq->resid_len = len;
+ ret = scsi_init_io(cmd);
out:
if (ret != BLKPREP_OK)
__free_page(page);
@@ -844,6 +844,12 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
+ if (sd_is_zoned(sdkp)) {
+ ret = sd_zbc_setup_write_cmnd(cmd);
+ if (ret != BLKPREP_OK)
+ return ret;
+ }
+
sector >>= ilog2(sdp->sector_size) - 9;
nr_sectors >>= ilog2(sdp->sector_size) - 9;
@@ -901,19 +907,25 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
struct request *rq = SCpnt->request;
struct scsi_device *sdp = SCpnt->device;
struct gendisk *disk = rq->rq_disk;
- struct scsi_disk *sdkp;
+ struct scsi_disk *sdkp = scsi_disk(disk);
sector_t block = blk_rq_pos(rq);
sector_t threshold;
unsigned int this_count = blk_rq_sectors(rq);
unsigned int dif, dix;
+ bool zoned_write = sd_is_zoned(sdkp) && rq_data_dir(rq) == WRITE;
int ret;
unsigned char protect;
+ if (zoned_write) {
+ ret = sd_zbc_setup_write_cmnd(SCpnt);
+ if (ret != BLKPREP_OK)
+ return ret;
+ }
+
ret = scsi_init_io(SCpnt);
if (ret != BLKPREP_OK)
goto out;
SCpnt = rq->special;
- sdkp = scsi_disk(disk);
/* from here on until we're complete, any goto out
* is used for a killable error condition */
@@ -1013,8 +1025,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
} else {
- scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n",
- req_op(rq), (unsigned long long) rq->cmd_flags);
+ scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq));
goto out;
}
@@ -1132,6 +1143,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
*/
ret = BLKPREP_OK;
out:
+ if (zoned_write && ret != BLKPREP_OK)
+ sd_zbc_cancel_write_cmnd(SCpnt);
+
return ret;
}
@@ -1149,6 +1163,10 @@ static int sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_READ:
case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
+ case REQ_OP_ZONE_REPORT:
+ return sd_zbc_setup_report_cmnd(cmd);
+ case REQ_OP_ZONE_RESET:
+ return sd_zbc_setup_reset_cmnd(cmd);
default:
BUG();
}
@@ -1158,8 +1176,8 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
- if (req_op(rq) == REQ_OP_DISCARD)
- __free_page(rq->completion_data);
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ __free_page(rq->special_vec.bv_page);
if (SCpnt->cmnd != rq->cmd) {
mempool_free(SCpnt->cmnd, sd_cdb_pool);
@@ -1495,7 +1513,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
*/
res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
&sshdr, timeout, SD_MAX_RETRIES,
- NULL, REQ_PM);
+ NULL, 0, RQF_PM);
if (res == 0)
break;
}
@@ -1780,7 +1798,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned char op = SCpnt->cmnd[0];
unsigned char unmap = SCpnt->cmnd[1] & 8;
- if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) {
+ switch (req_op(req)) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_WRITE_SAME:
+ case REQ_OP_ZONE_RESET:
if (!result) {
good_bytes = blk_rq_bytes(req);
scsi_set_resid(SCpnt, 0);
@@ -1788,6 +1809,17 @@ static int sd_done(struct scsi_cmnd *SCpnt)
good_bytes = 0;
scsi_set_resid(SCpnt, blk_rq_bytes(req));
}
+ break;
+ case REQ_OP_ZONE_REPORT:
+ if (!result) {
+ good_bytes = scsi_bufflen(SCpnt)
+ - scsi_get_resid(SCpnt);
+ scsi_set_resid(SCpnt, 0);
+ } else {
+ good_bytes = 0;
+ scsi_set_resid(SCpnt, blk_rq_bytes(req));
+ }
+ break;
}
if (result) {
@@ -1840,7 +1872,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
good_bytes = 0;
req->__data_len = blk_rq_bytes(req);
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
}
}
}
@@ -1848,7 +1880,11 @@ static int sd_done(struct scsi_cmnd *SCpnt)
default:
break;
}
+
out:
+ if (sd_is_zoned(sdkp))
+ sd_zbc_complete(SCpnt, good_bytes, &sshdr);
+
SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
"sd_done: completed %d of %d bytes\n",
good_bytes, scsi_bufflen(SCpnt)));
@@ -1983,7 +2019,6 @@ sd_spinup_disk(struct scsi_disk *sdkp)
}
}
-
/*
* Determine whether disk supports Data Integrity Field.
*/
@@ -2133,6 +2168,9 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
/* Logical blocks per physical block exponent */
sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
+ /* RC basis */
+ sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
+
/* Lowest aligned logical block */
alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
blk_queue_alignment_offset(sdp->request_queue, alignment);
@@ -2242,7 +2280,6 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
{
int sector_size;
struct scsi_device *sdp = sdkp->device;
- sector_t old_capacity = sdkp->capacity;
if (sd_try_rc16_first(sdp)) {
sector_size = read_capacity_16(sdkp, sdp, buffer);
@@ -2323,35 +2360,44 @@ got_data:
sector_size = 512;
}
blk_queue_logical_block_size(sdp->request_queue, sector_size);
+ blk_queue_physical_block_size(sdp->request_queue,
+ sdkp->physical_block_size);
+ sdkp->device->sector_size = sector_size;
- {
- char cap_str_2[10], cap_str_10[10];
+ if (sdkp->capacity > 0xffffffff)
+ sdp->use_16_for_rw = 1;
- string_get_size(sdkp->capacity, sector_size,
- STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
- string_get_size(sdkp->capacity, sector_size,
- STRING_UNITS_10, cap_str_10,
- sizeof(cap_str_10));
+}
- if (sdkp->first_scan || old_capacity != sdkp->capacity) {
- sd_printk(KERN_NOTICE, sdkp,
- "%llu %d-byte logical blocks: (%s/%s)\n",
- (unsigned long long)sdkp->capacity,
- sector_size, cap_str_10, cap_str_2);
+/*
+ * Print disk capacity
+ */
+static void
+sd_print_capacity(struct scsi_disk *sdkp,
+ sector_t old_capacity)
+{
+ int sector_size = sdkp->device->sector_size;
+ char cap_str_2[10], cap_str_10[10];
- if (sdkp->physical_block_size != sector_size)
- sd_printk(KERN_NOTICE, sdkp,
- "%u-byte physical blocks\n",
- sdkp->physical_block_size);
- }
- }
+ string_get_size(sdkp->capacity, sector_size,
+ STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+ string_get_size(sdkp->capacity, sector_size,
+ STRING_UNITS_10, cap_str_10,
+ sizeof(cap_str_10));
- if (sdkp->capacity > 0xffffffff)
- sdp->use_16_for_rw = 1;
+ if (sdkp->first_scan || old_capacity != sdkp->capacity) {
+ sd_printk(KERN_NOTICE, sdkp,
+ "%llu %d-byte logical blocks: (%s/%s)\n",
+ (unsigned long long)sdkp->capacity,
+ sector_size, cap_str_10, cap_str_2);
- blk_queue_physical_block_size(sdp->request_queue,
- sdkp->physical_block_size);
- sdkp->device->sector_size = sector_size;
+ if (sdkp->physical_block_size != sector_size)
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u-byte physical blocks\n",
+ sdkp->physical_block_size);
+
+ sd_zbc_print_zones(sdkp);
+ }
}
/* called with buffer of length 512 */
@@ -2613,7 +2659,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
- if (sdp->type != TYPE_DISK)
+ if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
return;
if (sdkp->protection_type == 0)
@@ -2720,6 +2766,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
*/
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
+ struct request_queue *q = sdkp->disk->queue;
unsigned char *buffer;
u16 rot;
const int vpd_len = 64;
@@ -2734,10 +2781,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
rot = get_unaligned_be16(&buffer[4]);
if (rot == 1) {
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
}
+ sdkp->zoned = (buffer[8] >> 4) & 3;
+ if (sdkp->zoned == 1)
+ q->limits.zoned = BLK_ZONED_HA;
+ else if (sdkp->device->type == TYPE_ZBC)
+ q->limits.zoned = BLK_ZONED_HM;
+ else
+ q->limits.zoned = BLK_ZONED_NONE;
+ if (blk_queue_is_zoned(q) && sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
+ q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
+
out:
kfree(buffer);
}
@@ -2809,6 +2867,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
struct request_queue *q = sdkp->disk->queue;
+ sector_t old_capacity = sdkp->capacity;
unsigned char *buffer;
unsigned int dev_max, rw_max;
@@ -2842,8 +2901,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
+ sd_zbc_read_zones(sdkp, buffer);
}
+ sd_print_capacity(sdkp, old_capacity);
+
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
@@ -3041,9 +3103,16 @@ static int sd_probe(struct device *dev)
scsi_autopm_get_device(sdp);
error = -ENODEV;
- if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
+ if (sdp->type != TYPE_DISK &&
+ sdp->type != TYPE_ZBC &&
+ sdp->type != TYPE_MOD &&
+ sdp->type != TYPE_RBC)
goto out;
+#ifndef CONFIG_BLK_DEV_ZONED
+ if (sdp->type == TYPE_ZBC)
+ goto out;
+#endif
SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
"sd_probe\n"));
@@ -3147,6 +3216,8 @@ static int sd_remove(struct device *dev)
del_gendisk(sdkp->disk);
sd_shutdown(dev);
+ sd_zbc_remove(sdkp);
+
blk_register_region(devt, SD_MINORS, NULL,
sd_default_probe, NULL, NULL);
@@ -3200,7 +3271,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
return -ENODEV;
res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
+ SD_TIMEOUT, SD_MAX_RETRIES, NULL, 0, RQF_PM);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
if (driver_byte(res) & DRIVER_SENSE)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index c8d986368da9..4dac35e96a75 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -64,6 +64,15 @@ struct scsi_disk {
struct scsi_device *device;
struct device dev;
struct gendisk *disk;
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int nr_zones;
+ unsigned int zone_blocks;
+ unsigned int zone_shift;
+ unsigned long *zones_wlock;
+ unsigned int zones_optimal_open;
+ unsigned int zones_optimal_nonseq;
+ unsigned int zones_max_open;
+#endif
atomic_t openers;
sector_t capacity; /* size in logical blocks */
u32 max_xfer_blocks;
@@ -94,6 +103,9 @@ struct scsi_disk {
unsigned lbpvpd : 1;
unsigned ws10 : 1;
unsigned ws16 : 1;
+ unsigned rc_basis: 2;
+ unsigned zoned: 2;
+ unsigned urswrz : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
@@ -156,6 +168,11 @@ static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t b
return blocks * sdev->sector_size;
}
+static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sector)
+{
+ return sector >> (ilog2(sdev->sector_size) - 9);
+}
+
/*
* Look up the DIX operation based on whether the command is read or
* write and whether dix and dif are enabled.
@@ -239,4 +256,57 @@ static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
#endif /* CONFIG_BLK_DEV_INTEGRITY */
+static inline int sd_is_zoned(struct scsi_disk *sdkp)
+{
+ return sdkp->zoned == 1 || sdkp->device->type == TYPE_ZBC;
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+
+extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
+extern void sd_zbc_remove(struct scsi_disk *sdkp);
+extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
+extern int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd);
+extern void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd);
+extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd);
+extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
+extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
+ struct scsi_sense_hdr *sshdr);
+
+#else /* CONFIG_BLK_DEV_ZONED */
+
+static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
+ unsigned char *buf)
+{
+ return 0;
+}
+
+static inline void sd_zbc_remove(struct scsi_disk *sdkp) {}
+
+static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
+
+static inline int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd)
+{
+ /* Let the drive fail requests */
+ return BLKPREP_OK;
+}
+
+static inline void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd) {}
+
+static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
+{
+ return BLKPREP_INVALID;
+}
+
+static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
+{
+ return BLKPREP_INVALID;
+}
+
+static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
+ unsigned int good_bytes,
+ struct scsi_sense_hdr *sshdr) {}
+
+#endif /* CONFIG_BLK_DEV_ZONED */
+
#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
new file mode 100644
index 000000000000..92620c8ea8ad
--- /dev/null
+++ b/drivers/scsi/sd_zbc.c
@@ -0,0 +1,648 @@
+/*
+ * SCSI Zoned Block commands
+ *
+ * Copyright (C) 2014-2015 SUSE Linux GmbH
+ * Written by: Hannes Reinecke <hare@suse.de>
+ * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
+ * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#include <linux/blkdev.h>
+
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+
+#include "sd.h"
+#include "scsi_priv.h"
+
+enum zbc_zone_type {
+ ZBC_ZONE_TYPE_CONV = 0x1,
+ ZBC_ZONE_TYPE_SEQWRITE_REQ,
+ ZBC_ZONE_TYPE_SEQWRITE_PREF,
+ ZBC_ZONE_TYPE_RESERVED,
+};
+
+enum zbc_zone_cond {
+ ZBC_ZONE_COND_NO_WP,
+ ZBC_ZONE_COND_EMPTY,
+ ZBC_ZONE_COND_IMP_OPEN,
+ ZBC_ZONE_COND_EXP_OPEN,
+ ZBC_ZONE_COND_CLOSED,
+ ZBC_ZONE_COND_READONLY = 0xd,
+ ZBC_ZONE_COND_FULL,
+ ZBC_ZONE_COND_OFFLINE,
+};
+
+/**
+ * Convert a zone descriptor to a zone struct.
+ */
+static void sd_zbc_parse_report(struct scsi_disk *sdkp,
+ u8 *buf,
+ struct blk_zone *zone)
+{
+ struct scsi_device *sdp = sdkp->device;
+
+ memset(zone, 0, sizeof(struct blk_zone));
+
+ zone->type = buf[0] & 0x0f;
+ zone->cond = (buf[1] >> 4) & 0xf;
+ if (buf[1] & 0x01)
+ zone->reset = 1;
+ if (buf[1] & 0x02)
+ zone->non_seq = 1;
+
+ zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
+ zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
+ zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
+ if (zone->type != ZBC_ZONE_TYPE_CONV &&
+ zone->cond == ZBC_ZONE_COND_FULL)
+ zone->wp = zone->start + zone->len;
+}
+
+/**
+ * Issue a REPORT ZONES scsi command.
+ */
+static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ unsigned int buflen, sector_t lba)
+{
+ struct scsi_device *sdp = sdkp->device;
+ const int timeout = sdp->request_queue->rq_timeout;
+ struct scsi_sense_hdr sshdr;
+ unsigned char cmd[16];
+ unsigned int rep_len;
+ int result;
+
+ memset(cmd, 0, 16);
+ cmd[0] = ZBC_IN;
+ cmd[1] = ZI_REPORT_ZONES;
+ put_unaligned_be64(lba, &cmd[2]);
+ put_unaligned_be32(buflen, &cmd[10]);
+ memset(buf, 0, buflen);
+
+ result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
+ buf, buflen, &sshdr,
+ timeout, SD_MAX_RETRIES, NULL);
+ if (result) {
+ sd_printk(KERN_ERR, sdkp,
+ "REPORT ZONES lba %llu failed with %d/%d\n",
+ (unsigned long long)lba,
+ host_byte(result), driver_byte(result));
+ return -EIO;
+ }
+
+ rep_len = get_unaligned_be32(&buf[0]);
+ if (rep_len < 64) {
+ sd_printk(KERN_ERR, sdkp,
+ "REPORT ZONES report invalid length %u\n",
+ rep_len);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ sector_t lba, sector = blk_rq_pos(rq);
+ unsigned int nr_bytes = blk_rq_bytes(rq);
+ int ret;
+
+ WARN_ON(nr_bytes == 0);
+
+ if (!sd_is_zoned(sdkp))
+ /* Not a zoned device */
+ return BLKPREP_KILL;
+
+ ret = scsi_init_io(cmd);
+ if (ret != BLKPREP_OK)
+ return ret;
+
+ cmd->cmd_len = 16;
+ memset(cmd->cmnd, 0, cmd->cmd_len);
+ cmd->cmnd[0] = ZBC_IN;
+ cmd->cmnd[1] = ZI_REPORT_ZONES;
+ lba = sectors_to_logical(sdkp->device, sector);
+ put_unaligned_be64(lba, &cmd->cmnd[2]);
+ put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
+ /* Do partial report for speeding things up */
+ cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;
+
+ cmd->sc_data_direction = DMA_FROM_DEVICE;
+ cmd->sdb.length = nr_bytes;
+ cmd->transfersize = sdkp->device->sector_size;
+ cmd->allowed = 0;
+
+ /*
+ * Report may return less bytes than requested. Make sure
+ * to report completion on the entire initial request.
+ */
+ rq->__data_len = nr_bytes;
+
+ return BLKPREP_OK;
+}
+
+static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
+ unsigned int good_bytes)
+{
+ struct request *rq = scmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct sg_mapping_iter miter;
+ struct blk_zone_report_hdr hdr;
+ struct blk_zone zone;
+ unsigned int offset, bytes = 0;
+ unsigned long flags;
+ u8 *buf;
+
+ if (good_bytes < 64)
+ return;
+
+ memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));
+
+ sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+
+ local_irq_save(flags);
+ while (sg_miter_next(&miter) && bytes < good_bytes) {
+
+ buf = miter.addr;
+ offset = 0;
+
+ if (bytes == 0) {
+ /* Set the report header */
+ hdr.nr_zones = min_t(unsigned int,
+ (good_bytes - 64) / 64,
+ get_unaligned_be32(&buf[0]) / 64);
+ memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
+ offset += 64;
+ bytes += 64;
+ }
+
+ /* Parse zone descriptors */
+ while (offset < miter.length && hdr.nr_zones) {
+ WARN_ON(offset > miter.length);
+ buf = miter.addr + offset;
+ sd_zbc_parse_report(sdkp, buf, &zone);
+ memcpy(buf, &zone, sizeof(struct blk_zone));
+ offset += 64;
+ bytes += 64;
+ hdr.nr_zones--;
+ }
+
+ if (!hdr.nr_zones)
+ break;
+
+ }
+ sg_miter_stop(&miter);
+ local_irq_restore(flags);
+}
+
+static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
+{
+ return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
+}
+
+static inline unsigned int sd_zbc_zone_no(struct scsi_disk *sdkp,
+ sector_t sector)
+{
+ return sectors_to_logical(sdkp->device, sector) >> sdkp->zone_shift;
+}
+
+int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ sector_t sector = blk_rq_pos(rq);
+ sector_t block = sectors_to_logical(sdkp->device, sector);
+ unsigned int zno = block >> sdkp->zone_shift;
+
+ if (!sd_is_zoned(sdkp))
+ /* Not a zoned device */
+ return BLKPREP_KILL;
+
+ if (sdkp->device->changed)
+ return BLKPREP_KILL;
+
+ if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
+ /* Unaligned request */
+ return BLKPREP_KILL;
+
+ /* Do not allow concurrent reset and writes */
+ if (sdkp->zones_wlock &&
+ test_and_set_bit(zno, sdkp->zones_wlock))
+ return BLKPREP_DEFER;
+
+ cmd->cmd_len = 16;
+ memset(cmd->cmnd, 0, cmd->cmd_len);
+ cmd->cmnd[0] = ZBC_OUT;
+ cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
+ put_unaligned_be64(block, &cmd->cmnd[2]);
+
+ rq->timeout = SD_TIMEOUT;
+ cmd->sc_data_direction = DMA_NONE;
+ cmd->transfersize = 0;
+ cmd->allowed = 0;
+
+ return BLKPREP_OK;
+}
+
+int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ sector_t sector = blk_rq_pos(rq);
+ sector_t zone_sectors = sd_zbc_zone_sectors(sdkp);
+ unsigned int zno = sd_zbc_zone_no(sdkp, sector);
+
+ /*
+ * Note: Checks of the alignment of the write command on
+ * logical blocks is done in sd.c
+ */
+
+ /* Do not allow zone boundaries crossing on host-managed drives */
+ if (blk_queue_zoned_model(sdkp->disk->queue) == BLK_ZONED_HM &&
+ (sector & (zone_sectors - 1)) + blk_rq_sectors(rq) > zone_sectors)
+ return BLKPREP_KILL;
+
+ /*
+ * Do not issue more than one write at a time per
+ * zone. This solves write ordering problems due to
+ * the unlocking of the request queue in the dispatch
+ * path in the non scsi-mq case. For scsi-mq, this
+ * also avoids potential write reordering when multiple
+ * threads running on different CPUs write to the same
+ * zone (with a synchronized sequential pattern).
+ */
+ if (sdkp->zones_wlock &&
+ test_and_set_bit(zno, sdkp->zones_wlock))
+ return BLKPREP_DEFER;
+
+ return BLKPREP_OK;
+}
+
+static void sd_zbc_unlock_zone(struct request *rq)
+{
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+
+ if (sdkp->zones_wlock) {
+ unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq));
+ WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock));
+ clear_bit_unlock(zno, sdkp->zones_wlock);
+ smp_mb__after_atomic();
+ }
+}
+
+void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd)
+{
+ sd_zbc_unlock_zone(cmd->request);
+}
+
+void sd_zbc_complete(struct scsi_cmnd *cmd,
+ unsigned int good_bytes,
+ struct scsi_sense_hdr *sshdr)
+{
+ int result = cmd->result;
+ struct request *rq = cmd->request;
+
+ switch (req_op(rq)) {
+ case REQ_OP_WRITE:
+ case REQ_OP_WRITE_SAME:
+ case REQ_OP_ZONE_RESET:
+
+ /* Unlock the zone */
+ sd_zbc_unlock_zone(rq);
+
+ if (!result ||
+ sshdr->sense_key != ILLEGAL_REQUEST)
+ break;
+
+ switch (sshdr->asc) {
+ case 0x24:
+ /*
+ * INVALID FIELD IN CDB error: For a zone reset,
+ * this means that a reset of a conventional
+ * zone was attempted. Nothing to worry about in
+ * this case, so be quiet about the error.
+ */
+ if (req_op(rq) == REQ_OP_ZONE_RESET)
+ rq->rq_flags |= RQF_QUIET;
+ break;
+ case 0x21:
+ /*
+ * INVALID ADDRESS FOR WRITE error: It is unlikely that
+ * retrying write requests failed with any kind of
+ * alignement error will result in success. So don't.
+ */
+ cmd->allowed = 0;
+ break;
+ }
+
+ break;
+
+ case REQ_OP_ZONE_REPORT:
+
+ if (!result)
+ sd_zbc_report_zones_complete(cmd, good_bytes);
+ break;
+
+ }
+}
+
+/**
+ * Read zoned block device characteristics (VPD page B6).
+ */
+static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
+ unsigned char *buf)
+{
+
+ if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
+ sd_printk(KERN_NOTICE, sdkp,
+ "Unconstrained-read check failed\n");
+ return -ENODEV;
+ }
+
+ if (sdkp->device->type != TYPE_ZBC) {
+ /* Host-aware */
+ sdkp->urswrz = 1;
+ sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]);
+ sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]);
+ sdkp->zones_max_open = 0;
+ } else {
+ /* Host-managed */
+ sdkp->urswrz = buf[4] & 1;
+ sdkp->zones_optimal_open = 0;
+ sdkp->zones_optimal_nonseq = 0;
+ sdkp->zones_max_open = get_unaligned_be64(&buf[16]);
+ }
+
+ return 0;
+}
+
+/**
+ * Check reported capacity.
+ */
+static int sd_zbc_check_capacity(struct scsi_disk *sdkp,
+ unsigned char *buf)
+{
+ sector_t lba;
+ int ret;
+
+ if (sdkp->rc_basis != 0)
+ return 0;
+
+ /* Do a report zone to get the maximum LBA to check capacity */
+ ret = sd_zbc_report_zones(sdkp, buf, SD_BUF_SIZE, 0);
+ if (ret)
+ return ret;
+
+ /* The max_lba field is the capacity of this device */
+ lba = get_unaligned_be64(&buf[8]);
+ if (lba + 1 == sdkp->capacity)
+ return 0;
+
+ if (sdkp->first_scan)
+ sd_printk(KERN_WARNING, sdkp,
+ "Changing capacity from %llu to max LBA+1 %llu\n",
+ (unsigned long long)sdkp->capacity,
+ (unsigned long long)lba + 1);
+ sdkp->capacity = lba + 1;
+
+ return 0;
+}
+
+#define SD_ZBC_BUF_SIZE 131072
+
+static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+{
+ u64 zone_blocks;
+ sector_t block = 0;
+ unsigned char *buf;
+ unsigned char *rec;
+ unsigned int buf_len;
+ unsigned int list_length;
+ int ret;
+ u8 same;
+
+ sdkp->zone_blocks = 0;
+
+ /* Get a buffer */
+ buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Do a report zone to get the same field */
+ ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
+ if (ret) {
+ zone_blocks = 0;
+ goto out;
+ }
+
+ same = buf[4] & 0x0f;
+ if (same > 0) {
+ rec = &buf[64];
+ zone_blocks = get_unaligned_be64(&rec[8]);
+ goto out;
+ }
+
+ /*
+ * Check the size of all zones: all zones must be of
+ * equal size, except the last zone which can be smaller
+ * than other zones.
+ */
+ do {
+
+ /* Parse REPORT ZONES header */
+ list_length = get_unaligned_be32(&buf[0]) + 64;
+ rec = buf + 64;
+ if (list_length < SD_ZBC_BUF_SIZE)
+ buf_len = list_length;
+ else
+ buf_len = SD_ZBC_BUF_SIZE;
+
+ /* Parse zone descriptors */
+ while (rec < buf + buf_len) {
+ zone_blocks = get_unaligned_be64(&rec[8]);
+ if (sdkp->zone_blocks == 0) {
+ sdkp->zone_blocks = zone_blocks;
+ } else if (zone_blocks != sdkp->zone_blocks &&
+ (block + zone_blocks < sdkp->capacity
+ || zone_blocks > sdkp->zone_blocks)) {
+ zone_blocks = 0;
+ goto out;
+ }
+ block += zone_blocks;
+ rec += 64;
+ }
+
+ if (block < sdkp->capacity) {
+ ret = sd_zbc_report_zones(sdkp, buf,
+ SD_ZBC_BUF_SIZE, block);
+ if (ret)
+ return ret;
+ }
+
+ } while (block < sdkp->capacity);
+
+ zone_blocks = sdkp->zone_blocks;
+
+out:
+ kfree(buf);
+
+ if (!zone_blocks) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Devices with non constant zone "
+ "size are not supported\n");
+ return -ENODEV;
+ }
+
+ if (!is_power_of_2(zone_blocks)) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Devices with non power of 2 zone "
+ "size are not supported\n");
+ return -ENODEV;
+ }
+
+ if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Zone size too large\n");
+ return -ENODEV;
+ }
+
+ sdkp->zone_blocks = zone_blocks;
+
+ return 0;
+}
+
+static int sd_zbc_setup(struct scsi_disk *sdkp)
+{
+
+ /* chunk_sectors indicates the zone size */
+ blk_queue_chunk_sectors(sdkp->disk->queue,
+ logical_to_sectors(sdkp->device, sdkp->zone_blocks));
+ sdkp->zone_shift = ilog2(sdkp->zone_blocks);
+ sdkp->nr_zones = sdkp->capacity >> sdkp->zone_shift;
+ if (sdkp->capacity & (sdkp->zone_blocks - 1))
+ sdkp->nr_zones++;
+
+ if (!sdkp->zones_wlock) {
+ sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!sdkp->zones_wlock)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int sd_zbc_read_zones(struct scsi_disk *sdkp,
+ unsigned char *buf)
+{
+ sector_t capacity;
+ int ret = 0;
+
+ if (!sd_is_zoned(sdkp))
+ /*
+ * Device managed or normal SCSI disk,
+ * no special handling required
+ */
+ return 0;
+
+
+ /* Get zoned block device characteristics */
+ ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
+ if (ret)
+ goto err;
+
+ /*
+ * Check for unconstrained reads: host-managed devices with
+ * constrained reads (drives failing read after write pointer)
+ * are not supported.
+ */
+ if (!sdkp->urswrz) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "constrained reads devices are not supported\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* Check capacity */
+ ret = sd_zbc_check_capacity(sdkp, buf);
+ if (ret)
+ goto err;
+ capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
+
+ /*
+ * Check zone size: only devices with a constant zone size (except
+ * an eventual last runt zone) that is a power of 2 are supported.
+ */
+ ret = sd_zbc_check_zone_size(sdkp);
+ if (ret)
+ goto err;
+
+ /* The drive satisfies the kernel restrictions: set it up */
+ ret = sd_zbc_setup(sdkp);
+ if (ret)
+ goto err;
+
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
+ return 0;
+
+err:
+ sdkp->capacity = 0;
+
+ return ret;
+}
+
+void sd_zbc_remove(struct scsi_disk *sdkp)
+{
+ kfree(sdkp->zones_wlock);
+ sdkp->zones_wlock = NULL;
+}
+
+void sd_zbc_print_zones(struct scsi_disk *sdkp)
+{
+ if (!sd_is_zoned(sdkp) || !sdkp->capacity)
+ return;
+
+ if (sdkp->capacity & (sdkp->zone_blocks - 1))
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u zones of %u logical blocks + 1 runt zone\n",
+ sdkp->nr_zones - 1,
+ sdkp->zone_blocks);
+ else
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u zones of %u logical blocks\n",
+ sdkp->nr_zones,
+ sdkp->zone_blocks);
+}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 618422ea3a41..605887d5ee57 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -546,7 +546,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
return DRIVER_ERROR << 24;
blk_rq_set_block_pc(req);
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
mdata->null_mapped = 1;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 05c745663c10..cf549871c1ee 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -5590,7 +5590,7 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
SCSI_SENSE_BUFFERSIZE, NULL,
- msecs_to_jiffies(1000), 3, NULL, REQ_PM);
+ msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
if (ret)
pr_err("%s: failed with err %d\n", __func__, ret);
@@ -5652,11 +5652,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
/*
* Current function would be generally called from the power management
- * callbacks hence set the REQ_PM flag so that it doesn't resume the
+ * callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
- START_STOP_TIMEOUT, 0, NULL, REQ_PM);
+ START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM);
if (ret) {
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index f8be0e2f7bf7..8ca1e9d0cfe2 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -34,6 +34,7 @@
#define __LNET_TYPES_H__
#include <linux/types.h>
+#include <linux/bvec.h>
/** \addtogroup lnet
* @{
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 50c0152ba022..76a6836cdf70 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -47,6 +47,7 @@
#include <linux/pagemap.h>
/* current_is_kswapd() */
#include <linux/swap.h>
+#include <linux/bvec.h>
#define DEBUG_SUBSYSTEM S_LLITE
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 372d744315f3..d316ed537d59 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -388,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd;
- bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (!immed)
bio->bi_private = cmd;
submit_bio(bio);
@@ -686,15 +686,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
/*
- * Force writethrough using WRITE_FUA if a volatile write cache
+ * Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
op = REQ_OP_WRITE;
if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA)
- op_flags = WRITE_FUA;
+ op_flags = REQ_FUA;
else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- op_flags = WRITE_FUA;
+ op_flags = REQ_FUA;
}
} else {
op = REQ_OP_READ;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 9125d9358dea..04d7aa7390d0 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -935,13 +935,9 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
bio, page, bytes, off);
- if (rc != bytes)
- goto fail;
-
pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
- bio->bi_vcnt, nr_vecs);
-
- if (bio->bi_vcnt > nr_vecs) {
+ bio_segments(bio), nr_vecs);
+ if (rc != bytes) {
pr_debug("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);