From 2f1a904ec548e48e7daa7b54fe18664d5a7e0d44 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 27 Jul 2015 19:24:31 -0400 Subject: drm/radeon/combios: add some validation of lvds values commit 0a90a0cff9f429f886f423967ae053150dce9259 upstream. Fixes a broken hsync start value uncovered by: abc0b1447d4974963548777a5ba4a4457c82c426 (drm: Perform basic sanity checks on probed modes) The driver handled the bad hsync start elsewhere, but the above commit prevented it from getting added. bug: https://bugs.freedesktop.org/show_bug.cgi?id=91401 Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/radeon/radeon_combios.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 68ce36056019..8cac69819054 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -1271,10 +1271,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { + u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; + + if (hss > lvds->native_mode.hdisplay) + hss = (10 - 1) * 8; + lvds->native_mode.htotal = lvds->native_mode.hdisplay + (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + - (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; + hss; lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + (RBIOS8(tmp + 23) * 8); -- cgit v1.2.3 From ec8ea7c221488d7e6a05a34e2f9fafab6f39c976 Mon Sep 17 00:00:00 2001 From: Brian King Date: Tue, 14 Jul 2015 11:41:29 -0500 Subject: ipr: Fix locking for unit attention handling commit 36b8e180e1e929e00b351c3b72aab3147fc14116 upstream. Make sure we have the host lock held when calling scsi_report_bus_reset. Fixes a crash seen as the __devices list in the scsi host was changing as we were iterating through it. Reviewed-by: Wen Xiong Reviewed-by: Gabriel Krisman Bertazi Signed-off-by: Brian King Reviewed-by: Martin K. Petersen Signed-off-by: James Bottomley Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/ipr.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f7732f3b9804..847414b9b468 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6141,21 +6141,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); - unsigned long hrrq_flags; + unsigned long lock_flags; scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { scsi_dma_unmap(scsi_cmd); - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); + spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); + spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); } else { - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + spin_lock(&ipr_cmd->hrrq->_lock); ipr_erp_start(ioa_cfg, ipr_cmd); - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); + spin_unlock(&ipr_cmd->hrrq->_lock); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); } } -- cgit v1.2.3 From 615b0eb835d3d37a9339872c5c1f7ce299e7a40f Mon Sep 17 00:00:00 2001 From: Brian King Date: Tue, 14 Jul 2015 11:41:31 -0500 Subject: ipr: Fix incorrect trace indexing commit bb7c54339e6a10ecce5c4961adf5e75b3cf0af30 upstream. When ipr's internal driver trace was changed to an atomic, a signed/unsigned bug slipped in which results in us indexing backwards in our memory buffer writing on memory that does not belong to us. This patch fixes this by removing the modulo and instead just mask off the low bits. Tested-by: Wen Xiong Reviewed-by: Wen Xiong Reviewed-by: Gabriel Krisman Bertazi Signed-off-by: Brian King Reviewed-by: Martin K. Petersen Signed-off-by: James Bottomley Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/ipr.c | 5 +++-- drivers/scsi/ipr.h | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 847414b9b468..5b9d5efdd12d 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -554,9 +554,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, { struct ipr_trace_entry *trace_entry; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + unsigned int trace_index; - trace_entry = &ioa_cfg->trace[atomic_add_return - (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; + trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; + trace_entry = &ioa_cfg->trace[trace_index]; trace_entry->time = jiffies; trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; trace_entry->type = type; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index c19911554036..e045676d8325 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1452,6 +1452,7 @@ struct ipr_ioa_cfg { #define IPR_NUM_TRACE_INDEX_BITS 8 #define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) +#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1) #define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) char trace_start[8]; #define IPR_TRACE_START_LABEL "trace" -- cgit v1.2.3 From b8a1310da4aaeace4accce0762af56625bfbd769 Mon Sep 17 00:00:00 2001 From: Brian King Date: Tue, 14 Jul 2015 11:41:33 -0500 Subject: ipr: Fix invalid array indexing for HRRQ commit 3f1c0581310d5d94bd72740231507e763a6252a4 upstream. Fixes another signed / unsigned array indexing bug in the ipr driver. Currently, when hrrq_index wraps, it becomes a negative number. We do the modulo, but still have a negative number, so we end up indexing backwards in the array. Given where the hrrq array is located in memory, we probably won't actually reference memory we don't own, but nonetheless ipr is still looking at data within struct ipr_ioa_cfg and interpreting it as struct ipr_hrr_queue data, so bad things could certainly happen. Each ipr adapter has anywhere from 1 to 16 HRRQs. By default, we use 2 on new adapters. Let's take an example: Assume ioa_cfg->hrrq_index=0x7fffffffe and ioa_cfg->hrrq_num=4: The atomic_add_return will then return -1. We mod this with 3 and get -2, add one and get -1 for an array index. On adapters which support more than a single HRRQ, we dedicate HRRQ to adapter initialization and error interrupts so that we can optimize the other queues for fast path I/O. So all normal I/O uses HRRQ 1-15. So we want to spread the I/O requests across those HRRQs. With the default module parameter settings, this bug won't hit, only when someone sets the ipr.number_of_msix parameter to a value larger than 3 is when bad things start to happen. Tested-by: Wen Xiong Reviewed-by: Wen Xiong Reviewed-by: Gabriel Krisman Bertazi Signed-off-by: Brian King Reviewed-by: Martin K. Petersen Signed-off-by: James Bottomley Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/ipr.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 5b9d5efdd12d..4a79a5f0d95e 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1007,10 +1007,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) { + unsigned int hrrq; + if (ioa_cfg->hrrq_num == 1) - return 0; - else - return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; + hrrq = 0; + else { + hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); + hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; + } + return hrrq; } /** -- cgit v1.2.3 From c0f94181a75400886041b183287ba91f1f392492 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Mon, 3 Aug 2015 16:07:48 +0300 Subject: xhci: fix off by one error in TRB DMA address boundary check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 7895086afde2a05fa24a0e410d8e6b75ca7c8fdd upstream. We need to check that a TRB is part of the current segment before calculating its DMA address. Previously a ring segment didn't use a full memory page, and every new ring segment got a new memory page, so the off by one error in checking the upper bound was never seen. Now that we use a full memory page, 256 TRBs (4096 bytes), the off by one didn't catch the case when a TRB was the first element of the next segment. This is triggered if the virtual memory pages for a ring segment are next to each in increasing order where the ring buffer wraps around and causes errors like: [ 106.398223] xhci_hcd 0000:00:14.0: ERROR Transfer event TRB DMA ptr not part of current TD ep_index 0 comp_code 1 [ 106.398230] xhci_hcd 0000:00:14.0: Looking for event-dma fffd3000 trb-start fffd4fd0 trb-end fffd5000 seg-start fffd4000 seg-end fffd4ff0 The trb-end address is one outside the end-seg address. Tested-by: Arkadiusz Miśkiewicz Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-ring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 95fe1a432d29..fde0277adc2c 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -85,7 +85,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, return 0; /* offset in TRBs */ segment_offset = trb - seg->trbs; - if (segment_offset > TRBS_PER_SEGMENT) + if (segment_offset >= TRBS_PER_SEGMENT) return 0; return seg->dma + (segment_offset * sizeof(*trb)); } -- cgit v1.2.3 From e850ac8e566a530291d3c6d023030b3ea4a5d48e Mon Sep 17 00:00:00 2001 From: Dirk Behme Date: Mon, 27 Jul 2015 08:56:05 +0200 Subject: USB: sierra: add 1199:68AB device ID commit 74472233233f577eaa0ca6d6e17d9017b6e53150 upstream. Add support for the Sierra Wireless AR8550 device with USB descriptor 0x1199, 0x68AB. It is common with MC879x modules 1199:683c/683d which also are composite devices with 7 interfaces (0..6) and also MDM62xx based as the AR8550. The major difference are only the interface attributes 02/02/01 on interfaces 3 and 4 on the AR8550. They are vendor specific ff/ff/ff on MC879x modules. lsusb reports: Bus 001 Device 004: ID 1199:68ab Sierra Wireless, Inc. Device Descriptor: bLength 18 bDescriptorType 1 bcdUSB 2.00 bDeviceClass 0 (Defined at Interface level) bDeviceSubClass 0 bDeviceProtocol 0 bMaxPacketSize0 64 idVendor 0x1199 Sierra Wireless, Inc. idProduct 0x68ab bcdDevice 0.06 iManufacturer 3 Sierra Wireless, Incorporated iProduct 2 AR8550 iSerial 0 bNumConfigurations 1 Configuration Descriptor: bLength 9 bDescriptorType 2 wTotalLength 198 bNumInterfaces 7 bConfigurationValue 1 iConfiguration 1 Sierra Configuration bmAttributes 0xe0 Self Powered Remote Wakeup MaxPower 0mA Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 0 bAlternateSetting 0 bNumEndpoints 2 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x81 EP 1 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x01 EP 1 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 1 bAlternateSetting 0 bNumEndpoints 2 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x82 EP 2 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x02 EP 2 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 2 bAlternateSetting 0 bNumEndpoints 2 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x83 EP 3 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x03 EP 3 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 3 bAlternateSetting 0 bNumEndpoints 3 bInterfaceClass 2 Communications bInterfaceSubClass 2 Abstract (modem) bInterfaceProtocol 1 AT-commands (v.25ter) iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x84 EP 4 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x0040 1x 64 bytes bInterval 5 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x85 EP 5 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x04 EP 4 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 4 bAlternateSetting 0 bNumEndpoints 3 bInterfaceClass 2 Communications bInterfaceSubClass 2 Abstract (modem) bInterfaceProtocol 1 AT-commands (v.25ter) iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x86 EP 6 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x0040 1x 64 bytes bInterval 5 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x87 EP 7 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x05 EP 5 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 5 bAlternateSetting 0 bNumEndpoints 3 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x88 EP 8 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x0040 1x 64 bytes bInterval 5 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x89 EP 9 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x06 EP 6 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 6 bAlternateSetting 0 bNumEndpoints 3 bInterfaceClass 255 Vendor Specific Class bInterfaceSubClass 255 Vendor Specific Subclass bInterfaceProtocol 255 Vendor Specific Protocol iInterface 0 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x8a EP 10 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x0040 1x 64 bytes bInterval 5 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x8b EP 11 IN bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x07 EP 7 OUT bmAttributes 2 Transfer Type Bulk Synch Type None Usage Type Data wMaxPacketSize 0x0200 1x 512 bytes bInterval 32 Device Qualifier (for other device speed): bLength 10 bDescriptorType 6 bcdUSB 2.00 bDeviceClass 0 (Defined at Interface level) bDeviceSubClass 0 bDeviceProtocol 0 bMaxPacketSize0 64 bNumConfigurations 1 Device Status: 0x0001 Self Powered Signed-off-by: Dirk Behme Cc: Lars Melin Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/sierra.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 5aaa2b675116..af9f82a1fcde 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, + { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */ /* AT&T Direct IP LTE modems */ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist -- cgit v1.2.3 From 21c7d3807a429ba6c606e30087d785c0ef3d6288 Mon Sep 17 00:00:00 2001 From: Benjamin Randazzo Date: Sat, 25 Jul 2015 16:36:50 +0200 Subject: md: use kzalloc() when bitmap is disabled commit b6878d9e03043695dbf3fa1caa6dfc09db225b16 upstream. In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a mdu_bitmap_file_t called "file". 5769 file = kmalloc(sizeof(*file), GFP_NOIO); 5770 if (!file) 5771 return -ENOMEM; This structure is copied to user space at the end of the function. 5786 if (err == 0 && 5787 copy_to_user(arg, file, sizeof(*file))) 5788 err = -EFAULT But if bitmap is disabled only the first byte of "file" is initialized with zero, so it's possible to read some bytes (up to 4095) of kernel space memory from user space. This is an information leak. 5775 /* bitmap disabled, zero the first byte and copy out */ 5776 if (!mddev->bitmap_info.file) 5777 file->pathname[0] = '\0'; Signed-off-by: Benjamin Randazzo Signed-off-by: NeilBrown Signed-off-by: Greg Kroah-Hartman --- drivers/md/md.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 631fe3e9c6e5..37ff00d014b4 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5628,9 +5628,9 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg) int err = -ENOMEM; if (md_allow_write(mddev)) - file = kmalloc(sizeof(*file), GFP_NOIO); + file = kzalloc(sizeof(*file), GFP_NOIO); else - file = kmalloc(sizeof(*file), GFP_KERNEL); + file = kzalloc(sizeof(*file), GFP_KERNEL); if (!file) goto out; -- cgit v1.2.3 From 471bfba87fb46dad486fe0b9aa4a14c2038383b7 Mon Sep 17 00:00:00 2001 From: Xie XiuQi Date: Fri, 24 Jan 2014 14:00:52 -0600 Subject: ipmi: fix timeout calculation when bmc is disconnected commit e21404dc0ac7ac971c1e36274b48bb460463f4e5 upstream. Loading ipmi_si module while bmc is disconnected, we found the timeout is longer than 5 secs. Actually it takes about 3 mins and 20 secs.(HZ=250) error message as below: Dec 12 19:08:59 linux kernel: IPMI BT: timeout in RD_WAIT [ ] 1 retries left Dec 12 19:08:59 linux kernel: BT: write 4 bytes seq=0x01 03 18 00 01 [...] Dec 12 19:12:19 linux kernel: IPMI BT: timeout in RD_WAIT [ ] Dec 12 19:12:19 linux kernel: failed 2 retries, sending error response Dec 12 19:12:19 linux kernel: IPMI: BT reset (takes 5 secs) Dec 12 19:12:19 linux kernel: IPMI BT: flag reset [ ] Function wait_for_msg_done() use schedule_timeout_uninterruptible(1) to sleep 1 tick, so we should subtract jiffies_to_usecs(1) instead of 100 usecs from timeout. Reported-by: Hu Shiyuan Signed-off-by: Xie XiuQi Signed-off-by: Corey Minyard Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/char/ipmi/ipmi_si_intf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 40b3f756f904..02cc352d8bcc 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2717,7 +2717,7 @@ static int wait_for_msg_done(struct smi_info *smi_info) smi_result == SI_SM_CALL_WITH_TICK_DELAY) { schedule_timeout_uninterruptible(1); smi_result = smi_info->handlers->event( - smi_info->si_sm, 100); + smi_info->si_sm, jiffies_to_usecs(1)); } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { smi_result = smi_info->handlers->event( smi_info->si_sm, 0); -- cgit v1.2.3 From 14c99cd5cdd34ee464eeb1fc9f0d560672a4f2ec Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 8 Sep 2013 00:25:36 -0700 Subject: mfd: sm501: dbg_regs attribute must be read-only commit 8a8320c2e78d1b619a8fa8eb5ae946b8691de604 upstream. Fix: sm501 sm501: SM501 At b3e00000: Version 050100a0, 8 Mb, IRQ 100 Attribute dbg_regs: write permission without 'store' ------------[ cut here ]------------ WARNING: at drivers/base/core.c:620 dbg_regs does not have a write function and must therefore be marked as read-only. Signed-off-by: Guenter Roeck Signed-off-by: Lee Jones Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/sm501.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 9816c232e583..c04e08d1d0fa 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -1232,7 +1232,7 @@ static ssize_t sm501_dbg_regs(struct device *dev, } -static DEVICE_ATTR(dbg_regs, 0666, sm501_dbg_regs, NULL); +static DEVICE_ATTR(dbg_regs, 0444, sm501_dbg_regs, NULL); /* sm501_init_reg * -- cgit v1.2.3 From 292f53675e097ca807df744fb6fd39211a134bf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Fri, 26 Jun 2015 03:28:24 +0200 Subject: xen/gntdevt: Fix race condition in gntdev_release() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 30b03d05e07467b8c6ec683ea96b5bffcbcd3931 upstream. While gntdev_release() is called the MMU notifier is still registered and can traverse priv->maps list even if no pages are mapped (which is the case -- gntdev_release() is called after all). But gntdev_release() will clear that list, so make sure that only one of those things happens at the same time. Signed-off-by: Marek Marczykowski-Górecki Signed-off-by: David Vrabel Signed-off-by: Greg Kroah-Hartman --- drivers/xen/gntdev.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 3c8803feba26..474d11499d0e 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -534,12 +534,14 @@ static int gntdev_release(struct inode *inode, struct file *flip) pr_debug("priv %p\n", priv); + mutex_lock(&priv->lock); while (!list_empty(&priv->maps)) { map = list_entry(priv->maps.next, struct grant_map, next); list_del(&map->next); gntdev_put_map(NULL /* already removed */, map); } WARN_ON(!list_empty(&priv->freeable_maps)); + mutex_unlock(&priv->lock); if (use_ptemod) mmu_notifier_unregister(&priv->mn, priv->mm); -- cgit v1.2.3 From d3646ba72fd7292e9166866aed65e6e8de4b6440 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Jul 2015 18:05:35 +0800 Subject: crypto: ixp4xx - Remove bogus BUG_ON on scattered dst buffer commit f898c522f0e9ac9f3177d0762b76e2ab2d2cf9c0 upstream. This patch removes a bogus BUG_ON in the ablkcipher path that triggers when the destination buffer is different from the source buffer and is scattered. Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/ixp4xx_crypto.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 21180d6cad6e..7cb51b3bb79e 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -915,7 +915,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt) crypt->mode |= NPE_OP_NOT_IN_PLACE; /* This was never tested by Intel * for more than one dst buffer, I think. */ - BUG_ON(req->dst->length < nbytes); req_ctx->dst = NULL; if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, flags, DMA_FROM_DEVICE)) -- cgit v1.2.3 From dff252b8499f7f7cb4353d5c64ed5dbc13b7daa0 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 16 Jul 2015 17:36:11 +0300 Subject: rbd: fix copyup completion race commit 2761713d35e370fd640b5781109f753066b746c4 upstream. For write/discard obj_requests that involved a copyup method call, the opcode of the first op is CEPH_OSD_OP_CALL and the ->callback is rbd_img_obj_copyup_callback(). The latter frees copyup pages, sets ->xferred and delegates to rbd_img_obj_callback(), the "normal" image object callback, for reporting to block layer and putting refs. rbd_osd_req_callback() however treats CEPH_OSD_OP_CALL as a trivial op, which means obj_request is marked done in rbd_osd_trivial_callback(), *before* ->callback is invoked and rbd_img_obj_copyup_callback() has a chance to run. Marking obj_request done essentially means giving rbd_img_obj_callback() a license to end it at any moment, so if another obj_request from the same img_request is being completed concurrently, rbd_img_obj_end_request() may very well be called on such prematurally marked done request: handle_reply() rbd_osd_req_callback() rbd_osd_trivial_callback() rbd_obj_request_complete() rbd_img_obj_copyup_callback() rbd_img_obj_callback() handle_reply() rbd_osd_req_callback() rbd_osd_trivial_callback() for_each_obj_request(obj_request->img_request) { rbd_img_obj_end_request(obj_request-1/2) rbd_img_obj_end_request(obj_request-2/2) <-- } Calling rbd_img_obj_end_request() on such a request leads to trouble, in particular because its ->xfferred is 0. We report 0 to the block layer with blk_update_request(), get back 1 for "this request has more data in flight" and then trip on rbd_assert(more ^ (which == img_request->obj_request_count)); with rhs (which == ...) being 1 because rbd_img_obj_end_request() has been called for both requests and lhs (more) being 1 because we haven't got a chance to set ->xfferred in rbd_img_obj_copyup_callback() yet. To fix this, leverage that rbd wants to call class methods in only two cases: one is a generic method call wrapper (obj_request is standalone) and the other is a copyup (obj_request is part of an img_request). So make a dedicated handler for CEPH_OSD_OP_CALL and directly invoke rbd_img_obj_copyup_callback() from it if obj_request is part of an img_request, similar to how CEPH_OSD_OP_READ handler invokes rbd_img_obj_request_read_callback(). Since rbd_img_obj_copyup_callback() is now being called from the OSD request callback (only), it is renamed to rbd_osd_copyup_callback(). Cc: Alex Elder Signed-off-by: Ilya Dryomov Reviewed-by: Alex Elder Signed-off-by: Greg Kroah-Hartman --- drivers/block/rbd.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f78cbbb88bd4..01677543248d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -457,6 +457,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) # define rbd_assert(expr) ((void) 0) #endif /* !RBD_DEBUG */ +static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request); static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); static void rbd_img_parent_read(struct rbd_obj_request *obj_request); static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); @@ -1670,6 +1671,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request) obj_request_done_set(obj_request); } +static void rbd_osd_call_callback(struct rbd_obj_request *obj_request) +{ + dout("%s: obj %p\n", __func__, obj_request); + + if (obj_request_img_data_test(obj_request)) + rbd_osd_copyup_callback(obj_request); + else + obj_request_done_set(obj_request); +} + static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, struct ceph_msg *msg) { @@ -1708,6 +1719,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_osd_stat_callback(obj_request); break; case CEPH_OSD_OP_CALL: + rbd_osd_call_callback(obj_request); + break; case CEPH_OSD_OP_NOTIFY_ACK: case CEPH_OSD_OP_WATCH: rbd_osd_trivial_callback(obj_request); @@ -2305,13 +2318,15 @@ out_unwind: } static void -rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) +rbd_osd_copyup_callback(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; struct rbd_device *rbd_dev; struct page **pages; u32 page_count; + dout("%s: obj %p\n", __func__, obj_request); + rbd_assert(obj_request->type == OBJ_REQUEST_BIO); rbd_assert(obj_request_img_data_test(obj_request)); img_request = obj_request->img_request; @@ -2337,9 +2352,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) if (!obj_request->result) obj_request->xferred = obj_request->length; - /* Finish up with the normal image object callback */ - - rbd_img_obj_callback(obj_request); + obj_request_done_set(obj_request); } static void @@ -2436,7 +2449,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) /* All set, send it off. */ - orig_request->callback = rbd_img_obj_copyup_callback; osdc = &rbd_dev->rbd_client->client->osdc; img_result = rbd_obj_request_submit(osdc, orig_request); if (!img_result) -- cgit v1.2.3 From 621468a3d79c2ef11ef2ec2eeb1038b8c4373b7d Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 22 Jul 2015 23:14:19 -0700 Subject: iscsi-target: Fix iscsit_start_kthreads failure OOPs commit e54198657b65625085834847ab6271087323ffea upstream. This patch fixes a regression introduced with the following commit in v4.0-rc1 code, where a iscsit_start_kthreads() failure triggers a NULL pointer dereference OOPs: commit 88dcd2dab5c23b1c9cfc396246d8f476c872f0ca Author: Nicholas Bellinger Date: Thu Feb 26 22:19:15 2015 -0800 iscsi-target: Convert iscsi_thread_set usage to kthread.h To address this bug, move iscsit_start_kthreads() immediately preceeding the transmit of last login response, before signaling a successful transition into full-feature-phase within existing iscsi_target_do_tx_login_io() logic. This ensures that no target-side resource allocation failures can occur after the final login response has been successfully sent. Also, it adds a iscsi_conn->rx_login_comp to allow the RX thread to sleep to prevent other socket related failures until the final iscsi_post_login_handler() call is able to complete. Cc: Sagi Grimberg Signed-off-by: Nicholas Bellinger Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target.c | 18 ++++++++++--- drivers/target/iscsi/iscsi_target_core.h | 1 + drivers/target/iscsi/iscsi_target_login.c | 43 ++++++++++++------------------- drivers/target/iscsi/iscsi_target_login.h | 1 + drivers/target/iscsi/iscsi_target_nego.c | 34 +++++++++++++++++++++++- 5 files changed, 66 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index efca110342cb..06cd916f91fe 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3874,7 +3874,13 @@ get_immediate: } transport_err: - iscsit_take_action_for_connection_exit(conn); + /* + * Avoid the normal connection failure code-path if this connection + * is still within LOGIN mode, and iscsi_np process context is + * responsible for cleaning up the early connection failure. + */ + if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) + iscsit_take_action_for_connection_exit(conn); out: return 0; } @@ -3956,7 +3962,7 @@ reject: int iscsi_target_rx_thread(void *arg) { - int ret; + int ret, rc; u8 buffer[ISCSI_HDR_LEN], opcode; u32 checksum = 0, digest = 0; struct iscsi_conn *conn = arg; @@ -3966,10 +3972,16 @@ int iscsi_target_rx_thread(void *arg) * connection recovery / failure event can be triggered externally. */ allow_signal(SIGINT); + /* + * Wait for iscsi_post_login_handler() to complete before allowing + * incoming iscsi/tcp socket I/O, and/or failing the connection. + */ + rc = wait_for_completion_interruptible(&conn->rx_login_comp); + if (rc < 0) + return 0; if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { struct completion comp; - int rc; init_completion(&comp); rc = wait_for_completion_interruptible(&comp); diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 815bf5b1a4ae..bf93e1c1ff97 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -589,6 +589,7 @@ struct iscsi_conn { int bitmap_id; int rx_thread_active; struct task_struct *rx_thread; + struct completion rx_login_comp; int tx_thread_active; struct task_struct *tx_thread; /* list_head for session connection list */ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 797b2e2acc35..2c4db62e327e 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -84,6 +84,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn) init_completion(&conn->conn_logout_comp); init_completion(&conn->rx_half_close_comp); init_completion(&conn->tx_half_close_comp); + init_completion(&conn->rx_login_comp); spin_lock_init(&conn->cmd_lock); spin_lock_init(&conn->conn_usage_lock); spin_lock_init(&conn->immed_queue_lock); @@ -718,6 +719,7 @@ int iscsit_start_kthreads(struct iscsi_conn *conn) return 0; out_tx: + send_sig(SIGINT, conn->tx_thread, 1); kthread_stop(conn->tx_thread); conn->tx_thread_active = false; out_bitmap: @@ -728,7 +730,7 @@ out_bitmap: return ret; } -int iscsi_post_login_handler( +void iscsi_post_login_handler( struct iscsi_np *np, struct iscsi_conn *conn, u8 zero_tsih) @@ -738,7 +740,6 @@ int iscsi_post_login_handler( struct se_session *se_sess = sess->se_sess; struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; - int rc; iscsit_inc_conn_usage_count(conn); @@ -779,10 +780,6 @@ int iscsi_post_login_handler( sess->sess_ops->InitiatorName); spin_unlock_bh(&sess->conn_lock); - rc = iscsit_start_kthreads(conn); - if (rc) - return rc; - iscsi_post_login_start_timers(conn); /* * Determine CPU mask to ensure connection's RX and TX kthreads @@ -791,15 +788,20 @@ int iscsi_post_login_handler( iscsit_thread_get_cpumask(conn); conn->conn_rx_reset_cpumask = 1; conn->conn_tx_reset_cpumask = 1; - + /* + * Wakeup the sleeping iscsi_target_rx_thread() now that + * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. + */ + complete(&conn->rx_login_comp); iscsit_dec_conn_usage_count(conn); + if (stop_timer) { spin_lock_bh(&se_tpg->session_lock); iscsit_stop_time2retain_timer(sess); spin_unlock_bh(&se_tpg->session_lock); } iscsit_dec_session_usage_count(sess); - return 0; + return; } iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); @@ -840,10 +842,6 @@ int iscsi_post_login_handler( " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); spin_unlock_bh(&se_tpg->session_lock); - rc = iscsit_start_kthreads(conn); - if (rc) - return rc; - iscsi_post_login_start_timers(conn); /* * Determine CPU mask to ensure connection's RX and TX kthreads @@ -852,10 +850,12 @@ int iscsi_post_login_handler( iscsit_thread_get_cpumask(conn); conn->conn_rx_reset_cpumask = 1; conn->conn_tx_reset_cpumask = 1; - + /* + * Wakeup the sleeping iscsi_target_rx_thread() now that + * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. + */ + complete(&conn->rx_login_comp); iscsit_dec_conn_usage_count(conn); - - return 0; } static void iscsi_handle_login_thread_timeout(unsigned long data) @@ -1331,20 +1331,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) if (iscsi_target_start_negotiation(login, conn) < 0) goto new_sess_out; - if (!conn->sess) { - pr_err("struct iscsi_conn session pointer is NULL!\n"); - goto new_sess_out; - } - iscsi_stop_login_thread_timer(np); - if (signal_pending(current)) - goto new_sess_out; - - ret = iscsi_post_login_handler(np, conn, zero_tsih); - - if (ret < 0) - goto new_sess_out; + iscsi_post_login_handler(np, conn, zero_tsih); iscsit_deaccess_np(np, tpg); tpg = NULL; diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 63efd2878451..6d7eb66de94b 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -12,6 +12,7 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); +extern int iscsit_start_kthreads(struct iscsi_conn *); extern int iscsi_target_login_thread(void *); extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 72d9dec991c0..77c276acccb6 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -19,6 +19,7 @@ ******************************************************************************/ #include +#include #include #include #include @@ -352,10 +353,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log ntohl(login_rsp->statsn), login->rsp_length); padding = ((-login->rsp_length) & 3); + /* + * Before sending the last login response containing the transition + * bit for full-feature-phase, go ahead and start up TX/RX threads + * now to avoid potential resource allocation failures after the + * final login response has been sent. + */ + if (login->login_complete) { + int rc = iscsit_start_kthreads(conn); + if (rc) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + } if (conn->conn_transport->iscsit_put_login_tx(conn, login, login->rsp_length + padding) < 0) - return -1; + goto err; login->rsp_length = 0; mutex_lock(&sess->cmdsn_mutex); @@ -364,6 +379,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log mutex_unlock(&sess->cmdsn_mutex); return 0; + +err: + if (login->login_complete) { + if (conn->rx_thread && conn->rx_thread_active) { + send_sig(SIGINT, conn->rx_thread, 1); + kthread_stop(conn->rx_thread); + } + if (conn->tx_thread && conn->tx_thread_active) { + send_sig(SIGINT, conn->tx_thread, 1); + kthread_stop(conn->tx_thread); + } + spin_lock(&iscsit_global->ts_bitmap_lock); + bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, + get_order(1)); + spin_unlock(&iscsit_global->ts_bitmap_lock); + } + return -1; } static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login) -- cgit v1.2.3 From c4a6d3f3491a55269ee1e8d99f6fa7bab15cc011 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 27 Jul 2015 11:48:52 +1000 Subject: md/raid1: extend spinlock to protect raid1_end_read_request against inconsistencies commit 423f04d63cf421ea436bcc5be02543d549ce4b28 upstream. raid1_end_read_request() assumes that the In_sync bits are consistent with the ->degaded count. raid1_spare_active updates the In_sync bit before the ->degraded count and so exposes an inconsistency, as does error() So extend the spinlock in raid1_spare_active() and error() to hide those inconsistencies. This should probably be part of Commit: 34cab6f42003 ("md/raid1: fix test for 'was read error from last working device'.") as it addresses the same issue. It fixes the same bug and should go to -stable for same reasons. Fixes: 76073054c95b ("md/raid1: clean up read_balance.") Signed-off-by: NeilBrown Signed-off-by: Greg Kroah-Hartman --- drivers/md/raid1.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index fa58438b298a..72141ee60705 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1382,6 +1382,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; struct r1conf *conf = mddev->private; + unsigned long flags; /* * If it is not operational, then we have already marked it as dead @@ -1401,14 +1402,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) return; } set_bit(Blocked, &rdev->flags); + spin_lock_irqsave(&conf->device_lock, flags); if (test_and_clear_bit(In_sync, &rdev->flags)) { - unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded++; set_bit(Faulty, &rdev->flags); - spin_unlock_irqrestore(&conf->device_lock, flags); } else set_bit(Faulty, &rdev->flags); + spin_unlock_irqrestore(&conf->device_lock, flags); /* * if recovery is running, make sure it aborts. */ @@ -1466,7 +1466,10 @@ static int raid1_spare_active(struct mddev *mddev) * Find all failed disks within the RAID1 configuration * and mark them readable. * Called under mddev lock, so rcu protection not needed. + * device_lock used to avoid races with raid1_end_read_request + * which expects 'In_sync' flags and ->degraded to be consistent. */ + spin_lock_irqsave(&conf->device_lock, flags); for (i = 0; i < conf->raid_disks; i++) { struct md_rdev *rdev = conf->mirrors[i].rdev; struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; @@ -1496,7 +1499,6 @@ static int raid1_spare_active(struct mddev *mddev) sysfs_notify_dirent_safe(rdev->sysfs_state); } } - spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded -= count; spin_unlock_irqrestore(&conf->device_lock, flags); -- cgit v1.2.3 From 4d0dd4350a4107265fd8701d4b26fdc033ad28cc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 21 Mar 2015 20:08:18 -0400 Subject: sg_start_req(): make sure that there's not too many elements in iovec commit 451a2886b6bf90e2fb378f7c46c655450fb96e81 upstream. unfortunately, allowing an arbitrary 16bit value means a possibility of overflow in the calculation of total number of pages in bio_map_user_iov() - we rely on there being no more than PAGE_SIZE members of sum in the first loop there. If that sum wraps around, we end up allocating too small array of pointers to pages and it's easy to overflow it in the second loop. X-Coverup: TINC (and there's no lumber cartel either) Signed-off-by: Al Viro [bwh: s/MAX_UIOVEC/UIO_MAXIOV/. This was fixed upstream by commit fdc81f45e9f5 ("sg_start_req(): use import_iovec()"), but we don't have that function.] Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/sg.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index eb81c98386b9..721d839d6c54 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1694,6 +1694,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) md->from_user = 0; } + if (unlikely(iov_count > UIO_MAXIOV)) + return -EINVAL; + if (iov_count) { int len, size = sizeof(struct sg_iovec) * iov_count; struct iovec *iov; -- cgit v1.2.3 From bc0a524cd874cb33146b641c4c8152a22b7da070 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 14 Aug 2015 17:04:21 +1000 Subject: md/bitmap: return an error when bitmap superblock is corrupt. commit b97e92574c0bf335db1cd2ec491d8ff5cd5d0b49 upstream Use separate bitmaps for each nodes in the cluster bitmap_read_sb() validates the bitmap superblock that it reads in. If it finds an inconsistency like a bad magic number or out-of-range version number, it prints an error and returns, but it incorrectly returns zero, so the array is still assembled with the (invalid) bitmap. This means it could try to use a bitmap with a new version number which it therefore does not understand. This bug was introduced in 3.5 and fix as part of a larger patch in 4.1. So the patch is suitable for any -stable kernel in that range. Fixes: 27581e5ae01f ("md/bitmap: centralise allocation of bitmap file pages.") Signed-off-by: NeilBrown Reported-by: GuoQing Jiang --- drivers/md/bitmap.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index a79cbd6038f6..37470ee7c850 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -564,6 +564,8 @@ static int bitmap_read_sb(struct bitmap *bitmap) if (err) return err; + err = -EINVAL; + sb = kmap_atomic(sb_page); chunksize = le32_to_cpu(sb->chunksize); -- cgit v1.2.3 From 9cdd5586f1e836ffd877c79c74240ee46fea5bd3 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Wed, 12 Aug 2015 15:10:21 +0100 Subject: dm thin metadata: delete btrees when releasing metadata snapshot commit 7f518ad0a212e2a6fd68630e176af1de395070a7 upstream. The device details and mapping trees were just being decremented before. Now btree_del() is called to do a deep delete. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-thin-metadata.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 3b1503dc1f13..43f6250baadd 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1281,8 +1281,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd) return r; disk_super = dm_block_data(copy); - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); + dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); + dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); dm_sm_dec_block(pmd->metadata_sm, held_root); return dm_tm_unlock(pmd->tm, copy); -- cgit v1.2.3 From 94c379756b6749d7641f883fa5307abe9da3964c Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Tue, 21 Jul 2015 11:00:53 +0200 Subject: EDAC, ppc4xx: Access mci->csrows array elements properly commit 5c16179b550b9fd8114637a56b153c9768ea06a5 upstream. The commit de3910eb79ac ("edac: change the mem allocation scheme to make Documentation/kobject.txt happy") changed the memory allocation for the csrows member. But ppc4xx_edac was forgotten in the patch. Fix it. Signed-off-by: Michael Walle Cc: linux-edac Cc: Mauro Carvalho Chehab Link: http://lkml.kernel.org/r/1437469253-8611-1-git-send-email-michael@walle.cc Signed-off-by: Borislav Petkov Signed-off-by: Greg Kroah-Hartman --- drivers/edac/ppc4xx_edac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index ef6b7e08f485..5c361f3c66aa 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -921,7 +921,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) */ for (row = 0; row < mci->nr_csrows; row++) { - struct csrow_info *csi = &mci->csrows[row]; + struct csrow_info *csi = mci->csrows[row]; /* * Get the configuration settings for this -- cgit v1.2.3 From 0a1b726993ae45ffda33dde0a5760926f682faf8 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 5 Jun 2015 14:20:51 -0700 Subject: libfc: Fix fc_fcp_cleanup_each_cmd() commit 8f2777f53e3d5ad8ef2a176a4463a5c8e1a16431 upstream. Since fc_fcp_cleanup_cmd() can sleep this function must not be called while holding a spinlock. This patch avoids that fc_fcp_cleanup_each_cmd() triggers the following bug: BUG: scheduling while atomic: sg_reset/1512/0x00000202 1 lock held by sg_reset/1512: #0: (&(&fsp->scsi_pkt_lock)->rlock){+.-...}, at: [] fc_fcp_cleanup_each_cmd.isra.21+0xa5/0x150 [libfc] Preemption disabled at:[] fc_fcp_cleanup_each_cmd.isra.21+0xa5/0x150 [libfc] Call Trace: [] dump_stack+0x4f/0x7b [] __schedule_bug+0x6c/0xd0 [] __schedule+0x71a/0xa10 [] schedule+0x32/0x80 [] fc_seq_set_resp+0xac/0x100 [libfc] [] fc_exch_done+0x41/0x60 [libfc] [] fc_fcp_cleanup_each_cmd.isra.21+0xcf/0x150 [libfc] [] fc_eh_device_reset+0x1c3/0x270 [libfc] [] scsi_try_bus_device_reset+0x29/0x60 [] scsi_ioctl_reset+0x258/0x2d0 [] scsi_ioctl+0x150/0x440 [] sd_ioctl+0xad/0x120 [] blkdev_ioctl+0x1b6/0x810 [] block_ioctl+0x38/0x40 [] do_vfs_ioctl+0x2f8/0x530 [] SyS_ioctl+0x81/0xa0 [] system_call_fastpath+0x16/0x7a Signed-off-by: Bart Van Assche Signed-off-by: Vasu Dev Signed-off-by: James Bottomley Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/libfc/fc_fcp.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 09c81b2f2169..42c46dc19537 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1039,11 +1039,26 @@ restart: fc_fcp_pkt_hold(fsp); spin_unlock_irqrestore(&si->scsi_queue_lock, flags); - if (!fc_fcp_lock_pkt(fsp)) { + spin_lock_bh(&fsp->scsi_pkt_lock); + if (!(fsp->state & FC_SRB_COMPL)) { + fsp->state |= FC_SRB_COMPL; + /* + * TODO: dropping scsi_pkt_lock and then reacquiring + * again around fc_fcp_cleanup_cmd() is required, + * since fc_fcp_cleanup_cmd() calls into + * fc_seq_set_resp() and that func preempts cpu using + * schedule. May be schedule and related code should be + * removed instead of unlocking here to avoid scheduling + * while atomic bug. + */ + spin_unlock_bh(&fsp->scsi_pkt_lock); + fc_fcp_cleanup_cmd(fsp, error); + + spin_lock_bh(&fsp->scsi_pkt_lock); fc_io_compl(fsp); - fc_fcp_unlock_pkt(fsp); } + spin_unlock_bh(&fsp->scsi_pkt_lock); fc_fcp_pkt_release(fsp); spin_lock_irqsave(&si->scsi_queue_lock, flags); -- cgit v1.2.3 From d51689e20adbf8fe2c3999939da8d61ed2ccbc58 Mon Sep 17 00:00:00 2001 From: Horia Geant? Date: Tue, 11 Aug 2015 20:19:20 +0300 Subject: crypto: caam - fix memory corruption in ahash_final_ctx commit b310c178e6d897f82abb9da3af1cd7c02b09f592 upstream. When doing pointer operation for accessing the HW S/G table, a value representing number of entries (and not number of bytes) must be used. Fixes: 045e36780f115 ("crypto: caam - ahash hmac support") Signed-off-by: Horia Geant? Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/caam/caamhash.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 84573b4d6f92..dda43cc4b6cd 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -895,13 +895,14 @@ static int ahash_final_ctx(struct ahash_request *req) state->buflen_1; u32 *sh_desc = ctx->sh_desc_fin, *desc; dma_addr_t ptr = ctx->sh_desc_fin_dma; - int sec4_sg_bytes; + int sec4_sg_bytes, sec4_sg_src_index; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret = 0; int sh_len; - sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); + sec4_sg_src_index = 1 + (buflen ? 1 : 0); + sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + @@ -928,7 +929,7 @@ static int ahash_final_ctx(struct ahash_request *req) state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, buf, state->buf_dma, buflen, last_buflen); - (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; + (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, LDST_SGF); -- cgit v1.2.3