summaryrefslogtreecommitdiff |
diff options
author | Takashi Iwai <tiwai@suse.de> | 2018-08-08 21:31:27 +0200 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2018-08-08 21:31:27 +0200 |
commit | 053bd5d2ca3d60db871363d490c89e971ec0e54a (patch) | |
tree | 77b0d0c9d3531bfd07865855e4d8ce86caac5b23 | |
parent | 3b9074bfa0239bef9648cd6a237b9d3f757c4536 (diff) | |
parent | 31a112b8d616397f187c49c6b63d11256f1e03fa (diff) |
Merge branch 'SLE15' into openSUSE-15.0
Conflicts:
config/ppc64le/vanilla
config/s390x/default
config/s390x/vanilla
config/x86_64/vanilla
suse-commit: 942604c436ef6c88284d73923a9d84c28502de0f
73 files changed, 3145 insertions, 1674 deletions
diff --git a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt index 2a42a323fa1a..a5b7b843c257 100644 --- a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt +++ b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt @@ -50,6 +50,13 @@ Main node required properties: Optional main node properties: - hip06-sas-v2-quirk-amt : when set, indicates that the v2 controller has the "am-max-transmissions" limitation. + - hisilicon,signal-attenuation : array of 3 32-bit values, containing de-emphasis, + preshoot, and boost attenuation readings for the board. They + are used to describe the signal attenuation of the board. These + values' range is 7600 to 12400, and used to represent -24dB to + 24dB. + The formula is "y = (x-10000)/10000". For example, 10478 + means 4.78dB. Example: sas0: sas@c1000000 { diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index 70474bd97a10..07c8f701e51c 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -266,6 +266,7 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, if (ret) { kfree(gck); hw = ERR_PTR(ret); + } return hw; } diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 1bd6f4497472..44fd5f5ae0bc 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -72,8 +72,8 @@ EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); #endif /** - * __bdev_dax_supported() - Check if the device supports dax for filesystem - * @sb: The superblock of the device + * ____bdev_dax_supported() - Check if the device supports dax for filesystem + * @bdev: block device to check * @blocksize: The block size of the device * * This is a library function for filesystems to check if the block device @@ -81,9 +81,9 @@ EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); * * Return: negative errno if unsupported, 0 if supported. */ -int __bdev_dax_supported(struct super_block *sb, int blocksize) +int ____bdev_dax_supported(struct block_device *bdev, int blocksize) { - struct block_device *bdev = sb->s_bdev; +#if IS_ENABLED(CONFIG_FS_DAX) struct dax_device *dax_dev; pgoff_t pgoff; struct request_queue *q; @@ -94,8 +94,8 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) char buf[BDEVNAME_SIZE]; if (blocksize != PAGE_SIZE) { - pr_err("VFS (%s): error: unsupported blocksize for dax\n", - sb->s_id); + pr_debug("%s: error: unsupported blocksize for dax\n", + bdevname(bdev, buf)); return -EINVAL; } @@ -108,15 +108,15 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); if (err) { - pr_err("VFS (%s): error: unaligned partition for dax\n", - sb->s_id); + pr_debug("%s: error: unaligned partition for dax\n", + bdevname(bdev, buf)); return err; } dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); if (!dax_dev) { - pr_err("VFS (%s): error: device does not support dax\n", - sb->s_id); + pr_err("%s: error: device does not support dax\n", + bdevname(bdev, buf)); return -EOPNOTSUPP; } @@ -127,12 +127,21 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) put_dax(dax_dev); if (len < 1) { - pr_err("VFS (%s): error: dax access failed (%ld)", - sb->s_id, len); + pr_debug("%s: error: dax access failed (%ld)\n", + bdevname(bdev, buf), len); return len < 0 ? len : -EIO; } return 0; +#else + return -EOPNOTSUPP; +#endif +} +EXPORT_SYMBOL_GPL(____bdev_dax_supported); + +int __bdev_dax_supported(struct super_block *sb, int blocksize) +{ + return ____bdev_dax_supported(sb->s_bdev, blocksize); } EXPORT_SYMBOL_GPL(__bdev_dax_supported); #endif diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 95643f44e818..83b9362be09c 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -201,7 +201,7 @@ config BLK_DEV_DM_BUILTIN config BLK_DEV_DM tristate "Device mapper support" select BLK_DEV_DM_BUILTIN - depends on DAX || DAX=n + select DAX ---help--- Device-mapper is a low level volume manager. It works by allowing people to specify mappings for ranges of logical sectors. Various diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 89443e0ededa..d5f8eff7c11d 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -154,7 +154,6 @@ static int linear_iterate_devices(struct dm_target *ti, return fn(ti, lc->dev, lc->start, ti->len, data); } -#if IS_ENABLED(CONFIG_DAX_DRIVER) static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { @@ -185,11 +184,6 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } -#else -#define linear_dax_direct_access NULL -#define linear_dax_copy_from_iter NULL -#endif - static struct target_type linear_target = { .name = "linear", .version = {1, 4, 0}, diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 43cd92835efb..5d8cedc5311c 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -611,6 +611,51 @@ static int log_mark(struct log_writes_c *lc, char *data) return 0; } +static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes, + struct iov_iter *i) +{ + struct pending_block *block; + + if (!bytes) + return 0; + + block = kzalloc(sizeof(struct pending_block), GFP_KERNEL); + if (!block) { + DMERR("Error allocating dax pending block"); + return -ENOMEM; + } + + block->data = kzalloc(bytes, GFP_KERNEL); + if (!block->data) { + DMERR("Error allocating dax data space"); + kfree(block); + return -ENOMEM; + } + + /* write data provided via the iterator */ + if (!copy_from_iter(block->data, bytes, i)) { + DMERR("Error copying dax data"); + kfree(block->data); + kfree(block); + return -EIO; + } + + /* rewind the iterator so that the block driver can use it */ + iov_iter_revert(i, bytes); + + block->datalen = bytes; + block->sector = bio_to_dev_sectors(lc, sector); + block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift; + + atomic_inc(&lc->pending_blocks); + spin_lock_irq(&lc->blocks_lock); + list_add_tail(&block->list, &lc->unflushed_blocks); + spin_unlock_irq(&lc->blocks_lock); + wake_up_process(lc->log_kthread); + + return 0; +} + static void log_writes_dtr(struct dm_target *ti) { struct log_writes_c *lc = ti->private; @@ -879,52 +924,6 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit limits->io_min = limits->physical_block_size; } -#if IS_ENABLED(CONFIG_DAX_DRIVER) -static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes, - struct iov_iter *i) -{ - struct pending_block *block; - - if (!bytes) - return 0; - - block = kzalloc(sizeof(struct pending_block), GFP_KERNEL); - if (!block) { - DMERR("Error allocating dax pending block"); - return -ENOMEM; - } - - block->data = kzalloc(bytes, GFP_KERNEL); - if (!block->data) { - DMERR("Error allocating dax data space"); - kfree(block); - return -ENOMEM; - } - - /* write data provided via the iterator */ - if (!copy_from_iter(block->data, bytes, i)) { - DMERR("Error copying dax data"); - kfree(block->data); - kfree(block); - return -EIO; - } - - /* rewind the iterator so that the block driver can use it */ - iov_iter_revert(i, bytes); - - block->datalen = bytes; - block->sector = bio_to_dev_sectors(lc, sector); - block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift; - - atomic_inc(&lc->pending_blocks); - spin_lock_irq(&lc->blocks_lock); - list_add_tail(&block->list, &lc->unflushed_blocks); - spin_unlock_irq(&lc->blocks_lock); - wake_up_process(lc->log_kthread); - - return 0; -} - static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { @@ -961,10 +960,6 @@ static size_t log_writes_dax_copy_from_iter(struct dm_target *ti, dax_copy: return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i); } -#else -#define log_writes_dax_direct_access NULL -#define log_writes_dax_copy_from_iter NULL -#endif static struct target_type log_writes_target = { .name = "log-writes", diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 24e300736d49..cd209e8902a6 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -311,7 +311,6 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } -#if IS_ENABLED(CONFIG_DAX_DRIVER) static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { @@ -352,11 +351,6 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } -#else -#define stripe_dax_direct_access NULL -#define stripe_dax_copy_from_iter NULL -#endif - /* * Stripe status: * diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ede4c0a043e4..a457aa231b44 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -889,9 +889,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type); static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); - - return q && blk_queue_dax(q); + return ____bdev_dax_supported(dev->bdev, PAGE_SIZE); } static bool dm_table_supports_dax(struct dm_table *t) @@ -1830,6 +1828,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (dm_table_supports_dax(t)) queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); + else + queue_flag_clear_unlocked(QUEUE_FLAG_DAX, q); + if (dm_table_supports_dax_write_cache(t)) dax_write_cache(t->md->dax_dev, true); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4e604631a428..758c2271431a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -984,8 +984,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, if (len < 1) goto out; nr_pages = min(len, nr_pages); - if (ti->type->direct_access) - ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); + ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); out: dm_put_live_table(md, srcu_idx); @@ -1704,7 +1703,7 @@ static void cleanup_mapped_device(struct mapped_device *md) static struct mapped_device *alloc_dev(int minor) { int r, numa_node_id = dm_get_numa_node(); - struct dax_device *dax_dev = NULL; + struct dax_device *dax_dev; struct mapped_device *md; void *old_md; @@ -1770,11 +1769,9 @@ static struct mapped_device *alloc_dev(int minor) md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); - if (IS_ENABLED(CONFIG_DAX_DRIVER)) { - dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); - if (!dax_dev) - goto bad; - } + dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); + if (!dax_dev) + goto bad; md->dax_dev = dax_dev; add_disk(md->disk); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 96f0f5d7a318..401d0060d1e1 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -329,7 +329,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, return; failure: - dev_info(dev, "replenish pools failure\n"); + if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) + dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); pool->free_map[pool->next_free] = index; pool->rx_buff[index].skb = NULL; @@ -717,23 +718,6 @@ static int init_tx_pools(struct net_device *netdev) return 0; } -static void release_error_buffers(struct ibmvnic_adapter *adapter) -{ - struct device *dev = &adapter->vdev->dev; - struct ibmvnic_error_buff *error_buff, *tmp; - unsigned long flags; - - spin_lock_irqsave(&adapter->error_list_lock, flags); - list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) { - list_del(&error_buff->list); - dma_unmap_single(dev, error_buff->dma, error_buff->len, - DMA_FROM_DEVICE); - kfree(error_buff->buff); - kfree(error_buff); - } - spin_unlock_irqrestore(&adapter->error_list_lock, flags); -} - static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) { int i; @@ -895,7 +879,6 @@ static void release_resources(struct ibmvnic_adapter *adapter) release_tx_pools(adapter); release_rx_pools(adapter); - release_error_buffers(adapter); release_napi(adapter); release_login_rsp_buffer(adapter); } @@ -1617,7 +1600,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) &tx_crq); } if (lpar_rc != H_SUCCESS) { - dev_err(dev, "tx failed with code %ld\n", lpar_rc); + if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) + dev_err_ratelimited(dev, "tx: send failed\n"); dev_kfree_skb_any(skb); tx_buff->skb = NULL; @@ -3204,6 +3188,25 @@ static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) return crq; } +static void print_subcrq_error(struct device *dev, int rc, const char *func) +{ + switch (rc) { + case H_PARAMETER: + dev_warn_ratelimited(dev, + "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", + func, rc); + break; + case H_CLOSED: + dev_warn_ratelimited(dev, + "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", + func, rc); + break; + default: + dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); + break; + } +} + static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, union sub_crq *sub_crq) { @@ -3230,11 +3233,8 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, cpu_to_be64(u64_crq[2]), cpu_to_be64(u64_crq[3])); - if (rc) { - if (rc == H_CLOSED) - dev_warn(dev, "CRQ Queue closed\n"); - dev_err(dev, "Send error (rc=%d)\n", rc); - } + if (rc) + print_subcrq_error(dev, rc, __func__); return rc; } @@ -3252,11 +3252,8 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, cpu_to_be64(remote_handle), ioba, num_entries); - if (rc) { - if (rc == H_CLOSED) - dev_warn(dev, "CRQ Queue closed\n"); - dev_err(dev, "Send (indirect) error (rc=%d)\n", rc); - } + if (rc) + print_subcrq_error(dev, rc, __func__); return rc; } @@ -3828,132 +3825,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) ibmvnic_send_crq(adapter, &crq); } -static void handle_error_info_rsp(union ibmvnic_crq *crq, - struct ibmvnic_adapter *adapter) -{ - struct device *dev = &adapter->vdev->dev; - struct ibmvnic_error_buff *error_buff, *tmp; - unsigned long flags; - bool found = false; - int i; - - if (!crq->request_error_rsp.rc.code) { - dev_info(dev, "Request Error Rsp returned with rc=%x\n", - crq->request_error_rsp.rc.code); - return; - } - - spin_lock_irqsave(&adapter->error_list_lock, flags); - list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) - if (error_buff->error_id == crq->request_error_rsp.error_id) { - found = true; - list_del(&error_buff->list); - break; - } - spin_unlock_irqrestore(&adapter->error_list_lock, flags); - - if (!found) { - dev_err(dev, "Couldn't find error id %x\n", - be32_to_cpu(crq->request_error_rsp.error_id)); - return; - } - - dev_err(dev, "Detailed info for error id %x:", - be32_to_cpu(crq->request_error_rsp.error_id)); - - for (i = 0; i < error_buff->len; i++) { - pr_cont("%02x", (int)error_buff->buff[i]); - if (i % 8 == 7) - pr_cont(" "); - } - pr_cont("\n"); - - dma_unmap_single(dev, error_buff->dma, error_buff->len, - DMA_FROM_DEVICE); - kfree(error_buff->buff); - kfree(error_buff); -} - -static void request_error_information(struct ibmvnic_adapter *adapter, - union ibmvnic_crq *err_crq) -{ - struct device *dev = &adapter->vdev->dev; - struct net_device *netdev = adapter->netdev; - struct ibmvnic_error_buff *error_buff; - unsigned long timeout = msecs_to_jiffies(30000); - union ibmvnic_crq crq; - unsigned long flags; - int rc, detail_len; - - error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); - if (!error_buff) - return; - - detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz); - error_buff->buff = kmalloc(detail_len, GFP_ATOMIC); - if (!error_buff->buff) { - kfree(error_buff); - return; - } - - error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, error_buff->dma)) { - netdev_err(netdev, "Couldn't map error buffer\n"); - kfree(error_buff->buff); - kfree(error_buff); - return; - } - - error_buff->len = detail_len; - error_buff->error_id = err_crq->error_indication.error_id; - - spin_lock_irqsave(&adapter->error_list_lock, flags); - list_add_tail(&error_buff->list, &adapter->errors); - spin_unlock_irqrestore(&adapter->error_list_lock, flags); - - memset(&crq, 0, sizeof(crq)); - crq.request_error_info.first = IBMVNIC_CRQ_CMD; - crq.request_error_info.cmd = REQUEST_ERROR_INFO; - crq.request_error_info.ioba = cpu_to_be32(error_buff->dma); - crq.request_error_info.len = cpu_to_be32(detail_len); - crq.request_error_info.error_id = err_crq->error_indication.error_id; - - rc = ibmvnic_send_crq(adapter, &crq); - if (rc) { - netdev_err(netdev, "failed to request error information\n"); - goto err_info_fail; - } - - if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { - netdev_err(netdev, "timeout waiting for error information\n"); - goto err_info_fail; +static const char *ibmvnic_fw_err_cause(u16 cause) +{ + switch (cause) { + case ADAPTER_PROBLEM: + return "adapter problem"; + case BUS_PROBLEM: + return "bus problem"; + case FW_PROBLEM: + return "firmware problem"; + case DD_PROBLEM: + return "device driver problem"; + case EEH_RECOVERY: + return "EEH recovery"; + case FW_UPDATED: + return "firmware updated"; + case LOW_MEMORY: + return "low Memory"; + default: + return "unknown"; } - - return; - -err_info_fail: - spin_lock_irqsave(&adapter->error_list_lock, flags); - list_del(&error_buff->list); - spin_unlock_irqrestore(&adapter->error_list_lock, flags); - - kfree(error_buff->buff); - kfree(error_buff); } static void handle_error_indication(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; + u16 cause; - dev_err(dev, "Firmware reports %serror id %x, cause %d\n", - crq->error_indication.flags - & IBMVNIC_FATAL_ERROR ? "FATAL " : "", - be32_to_cpu(crq->error_indication.error_id), - be16_to_cpu(crq->error_indication.error_cause)); + cause = be16_to_cpu(crq->error_indication.error_cause); - if (be32_to_cpu(crq->error_indication.error_id)) - request_error_information(adapter, crq); + dev_warn_ratelimited(dev, + "Firmware reports %serror, cause: %s. Starting recovery...\n", + crq->error_indication.flags + & IBMVNIC_FATAL_ERROR ? "FATAL " : "", + ibmvnic_fw_err_cause(cause)); if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) ibmvnic_reset(adapter, VNIC_RESET_FATAL); @@ -4453,10 +4359,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, netdev_dbg(netdev, "Got Error Indication\n"); handle_error_indication(crq, adapter); break; - case REQUEST_ERROR_RSP: - netdev_dbg(netdev, "Got Error Detail Response\n"); - handle_error_info_rsp(crq, adapter); - break; case REQUEST_STATISTICS_RSP: netdev_dbg(netdev, "Got Statistics Response\n"); complete(&adapter->stats_done); @@ -4815,9 +4717,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) spin_lock_init(&adapter->stats_lock); - INIT_LIST_HEAD(&adapter->errors); - spin_lock_init(&adapter->error_list_lock); - INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); INIT_LIST_HEAD(&adapter->rwi_list); mutex_init(&adapter->reset_lock); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index f9fb780102ac..f06eec145ca6 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -512,24 +512,6 @@ struct ibmvnic_error_indication { u8 reserved2[2]; } __packed __aligned(8); -struct ibmvnic_request_error_info { - u8 first; - u8 cmd; - u8 reserved[2]; - __be32 ioba; - __be32 len; - __be32 error_id; -} __packed __aligned(8); - -struct ibmvnic_request_error_rsp { - u8 first; - u8 cmd; - u8 reserved[2]; - __be32 error_id; - __be32 len; - struct ibmvnic_rc rc; -} __packed __aligned(8); - struct ibmvnic_link_state_indication { u8 first; u8 cmd; @@ -709,8 +691,6 @@ union ibmvnic_crq { struct ibmvnic_request_debug_stats request_debug_stats; struct ibmvnic_request_debug_stats request_debug_stats_rsp; struct ibmvnic_error_indication error_indication; - struct ibmvnic_request_error_info request_error_info; - struct ibmvnic_request_error_rsp request_error_rsp; struct ibmvnic_link_state_indication link_state_indication; struct ibmvnic_change_mac_addr change_mac_addr; struct ibmvnic_change_mac_addr change_mac_addr_rsp; @@ -809,8 +789,6 @@ enum ibmvnic_commands { SET_PHYS_PARMS = 0x07, SET_PHYS_PARMS_RSP = 0x87, ERROR_INDICATION = 0x08, - REQUEST_ERROR_INFO = 0x09, - REQUEST_ERROR_RSP = 0x89, LOGICAL_LINK_STATE = 0x0C, LOGICAL_LINK_STATE_RSP = 0x8C, REQUEST_STATISTICS = 0x0D, @@ -945,14 +923,6 @@ struct ibmvnic_rx_pool { struct ibmvnic_long_term_buff long_term_buff; }; -struct ibmvnic_error_buff { - char *buff; - dma_addr_t dma; - int len; - struct list_head list; - __be32 error_id; -}; - struct ibmvnic_vpd { unsigned char *buff; dma_addr_t dma_addr; @@ -1047,9 +1017,6 @@ struct ibmvnic_adapter { struct completion init_done; int init_done_rc; - struct list_head errors; - spinlock_t error_list_lock; - struct completion fw_done; int fw_done_rc; diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 90eca9ed304d..c79c2ac0f379 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -565,14 +565,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk) { struct device *dev = disk_to_dev(disk)->parent; struct nd_region *nd_region = to_nd_region(dev->parent); - const char *pol = nd_region->ro ? "only" : "write"; + int disk_ro = get_disk_ro(disk); - if (nd_region->ro == get_disk_ro(disk)) + /* + * Upgrade to read-only if the region is read-only preserve as + * read-only if the disk is already read-only. + */ + if (disk_ro || nd_region->ro == disk_ro) return 0; - dev_info(dev, "%s read-%s, marking %s read-%s\n", - dev_name(&nd_region->dev), pol, disk->disk_name, pol); - set_disk_ro(disk, nd_region->ro); + dev_info(dev, "%s read-only, marking %s read-only\n", + dev_name(&nd_region->dev), disk->disk_name); + set_disk_ro(disk, 1); return 0; diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index f8913b8124b6..233907889f96 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c @@ -67,9 +67,11 @@ static int nvdimm_probe(struct device *dev) ndd->ns_next = nd_label_next_nsindex(ndd->ns_current); nd_label_copy(ndd, to_next_namespace_index(ndd), to_current_namespace_index(ndd)); - rc = nd_label_reserve_dpa(ndd); - if (ndd->ns_current >= 0) - nvdimm_set_aliasing(dev); + if (ndd->ns_current >= 0) { + rc = nd_label_reserve_dpa(ndd); + if (rc == 0) + nvdimm_set_aliasing(dev); + } nvdimm_clear_locked(dev); nvdimm_bus_unlock(dev); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index faf2834bd7d1..e80200b919ac 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -365,7 +365,8 @@ static int pmem_attach_disk(struct device *dev, blk_queue_logical_block_size(q, pmem_sector_size(ndns)); blk_queue_max_hw_sectors(q, UINT_MAX); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); + if (pmem->pfn_flags & PFN_MAP) + queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); q->queuedata = pmem; disk = alloc_disk_node(0, nid); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 21a8a61684d3..aea49b5fd9d1 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -673,10 +673,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, } cmd->common.command_id = req->tag; - if (ns) - trace_nvme_setup_nvm_cmd(req->q->id, cmd); - else - trace_nvme_setup_admin_cmd(cmd); + trace_nvme_setup_cmd(req, cmd); return ret; } EXPORT_SYMBOL_GPL(nvme_setup_cmd); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 3dc0980d7435..34f363dc7de8 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -715,6 +715,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -EINVAL; goto out; } + if (opts->discovery_nqn) { + pr_debug("Ignoring nr_io_queues value for discovery controller\n"); + break; + } + opts->nr_io_queues = min_t(unsigned int, num_online_cpus(), token); break; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index c4c20ee6bf1e..e5435187dfb4 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -24,7 +24,7 @@ MODULE_PARM_DESC(multipath, inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) { - return multipath && (ctrl->subsys->cmic & (1 << 3)); + return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3)); } /* diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 5856616a5abc..408cb8bd7de7 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -118,6 +118,13 @@ static inline struct nvme_request *nvme_req(struct request *req) return blk_mq_rq_to_pdu(req); } +static inline u16 nvme_req_qid(struct request *req) +{ + if (!req->rq_disk) + return 0; + return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1; +} + /* The below value is the specific amount of delay needed before checking * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h index ea91fccd1bc0..09844495a86a 100644 --- a/drivers/nvme/host/trace.h +++ b/drivers/nvme/host/trace.h @@ -50,13 +50,8 @@ nvme_admin_opcode_name(nvme_admin_security_recv), \ nvme_admin_opcode_name(nvme_admin_sanitize_nvm)) -const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode, - u8 *cdw10); -#define __parse_nvme_admin_cmd(opcode, cdw10) \ - nvme_trace_parse_admin_cmd(p, opcode, cdw10) - #define nvme_opcode_name(opcode) { opcode, #opcode } -#define show_opcode_name(val) \ +#define show_nvm_opcode_name(val) \ __print_symbolic(val, \ nvme_opcode_name(nvme_cmd_flush), \ nvme_opcode_name(nvme_cmd_write), \ @@ -70,83 +65,66 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode, nvme_opcode_name(nvme_cmd_resv_acquire), \ nvme_opcode_name(nvme_cmd_resv_release)) -const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode, - u8 *cdw10); -#define __parse_nvme_cmd(opcode, cdw10) \ - nvme_trace_parse_nvm_cmd(p, opcode, cdw10) +#define show_opcode_name(qid, opcode) \ + (qid ? show_nvm_opcode_name(opcode) : show_admin_opcode_name(opcode)) -TRACE_EVENT(nvme_setup_admin_cmd, - TP_PROTO(struct nvme_command *cmd), - TP_ARGS(cmd), - TP_STRUCT__entry( - __field(u8, opcode) - __field(u8, flags) - __field(u16, cid) - __field(u64, metadata) - __array(u8, cdw10, 24) - ), - TP_fast_assign( - __entry->opcode = cmd->common.opcode; - __entry->flags = cmd->common.flags; - __entry->cid = cmd->common.command_id; - __entry->metadata = le64_to_cpu(cmd->common.metadata); - memcpy(__entry->cdw10, cmd->common.cdw10, - sizeof(__entry->cdw10)); - ), - TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)", - __entry->cid, __entry->flags, __entry->metadata, - show_admin_opcode_name(__entry->opcode), - __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10)) -); +const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode, + u8 *cdw10); +const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode, + u8 *cdw10); +#define parse_nvme_cmd(qid, opcode, cdw10) \ + (qid ? \ + nvme_trace_parse_nvm_cmd(p, opcode, cdw10) : \ + nvme_trace_parse_admin_cmd(p, opcode, cdw10)) -TRACE_EVENT(nvme_setup_nvm_cmd, - TP_PROTO(int qid, struct nvme_command *cmd), - TP_ARGS(qid, cmd), +TRACE_EVENT(nvme_setup_cmd, + TP_PROTO(struct request *req, struct nvme_command *cmd), + TP_ARGS(req, cmd), TP_STRUCT__entry( - __field(int, qid) - __field(u8, opcode) - __field(u8, flags) - __field(u16, cid) - __field(u32, nsid) - __field(u64, metadata) - __array(u8, cdw10, 24) + __field(int, qid) + __field(u8, opcode) + __field(u8, flags) + __field(u16, cid) + __field(u32, nsid) + __field(u64, metadata) + __array(u8, cdw10, 24) ), TP_fast_assign( - __entry->qid = qid; - __entry->opcode = cmd->common.opcode; - __entry->flags = cmd->common.flags; - __entry->cid = cmd->common.command_id; - __entry->nsid = le32_to_cpu(cmd->common.nsid); - __entry->metadata = le64_to_cpu(cmd->common.metadata); - memcpy(__entry->cdw10, cmd->common.cdw10, - sizeof(__entry->cdw10)); + __entry->qid = nvme_req_qid(req); + __entry->opcode = cmd->common.opcode; + __entry->flags = cmd->common.flags; + __entry->cid = cmd->common.command_id; + __entry->nsid = le32_to_cpu(cmd->common.nsid); + __entry->metadata = le64_to_cpu(cmd->common.metadata); + memcpy(__entry->cdw10, cmd->common.cdw10, + sizeof(__entry->cdw10)); ), - TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)", - __entry->qid, __entry->nsid, __entry->cid, + TP_printk("qid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)", + __entry->qid, __entry->cid, __entry->nsid, __entry->flags, __entry->metadata, - show_opcode_name(__entry->opcode), - __parse_nvme_cmd(__entry->opcode, __entry->cdw10)) + show_opcode_name(__entry->qid, __entry->opcode), + parse_nvme_cmd(__entry->qid, __entry->opcode, __entry->cdw10)) ); TRACE_EVENT(nvme_complete_rq, TP_PROTO(struct request *req), TP_ARGS(req), TP_STRUCT__entry( - __field(int, qid) - __field(int, cid) - __field(u64, result) - __field(u8, retries) - __field(u8, flags) - __field(u16, status) + __field(int, qid) + __field(int, cid) + __field(u64, result) + __field(u8, retries) + __field(u8, flags) + __field(u16, status) ), TP_fast_assign( - __entry->qid = req->q->id; - __entry->cid = req->tag; - __entry->result = le64_to_cpu(nvme_req(req)->result.u64); - __entry->retries = nvme_req(req)->retries; - __entry->flags = nvme_req(req)->flags; - __entry->status = nvme_req(req)->status; + __entry->qid = nvme_req_qid(req); + __entry->cid = req->tag; + __entry->result = le64_to_cpu(nvme_req(req)->result.u64); + __entry->retries = nvme_req(req)->retries; + __entry->flags = nvme_req(req)->flags; + __entry->status = nvme_req(req)->status; ), TP_printk("cmdid=%u, qid=%d, res=%llu, retries=%u, flags=0x%x, status=%u", __entry->cid, __entry->qid, __entry->result, diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig index aba2af032615..db29dfd54d25 100644 --- a/drivers/pinctrl/intel/Kconfig +++ b/drivers/pinctrl/intel/Kconfig @@ -80,6 +80,14 @@ config PINCTRL_GEMINILAKE This pinctrl driver provides an interface that allows configuring of Intel Gemini Lake SoC pins and using them as GPIOs. +config PINCTRL_LEWISBURG + tristate "Intel Lewisburg pinctrl and GPIO driver" + depends on ACPI + select PINCTRL_INTEL + help + This pinctrl driver provides an interface that allows configuring + of Intel Lewisburg pins and using them as GPIOs. + config PINCTRL_SUNRISEPOINT tristate "Intel Sunrisepoint pinctrl and GPIO driver" depends on ACPI diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile index d9b31f7e2b1b..c12874da5992 100644 --- a/drivers/pinctrl/intel/Makefile +++ b/drivers/pinctrl/intel/Makefile @@ -8,4 +8,5 @@ obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o obj-$(CONFIG_PINCTRL_DENVERTON) += pinctrl-denverton.o obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o +obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c new file mode 100644 index 000000000000..14d56ea6cfdc --- /dev/null +++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c @@ -0,0 +1,343 @@ +/* + * Intel Lewisburg pinctrl/GPIO driver + * + * Copyright (C) 2017, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-intel.h" + +#define LBG_PAD_OWN 0x020 +#define LBG_PADCFGLOCK 0x060 +#define LBG_HOSTSW_OWN 0x080 +#define LBG_GPI_IE 0x110 + +#define LBG_COMMUNITY(b, s, e) \ + { \ + .barno = (b), \ + .padown_offset = LBG_PAD_OWN, \ + .padcfglock_offset = LBG_PADCFGLOCK, \ + .hostown_offset = LBG_HOSTSW_OWN, \ + .ie_offset = LBG_GPI_IE, \ + .gpp_size = 24, \ + .pin_base = (s), \ + .npins = ((e) - (s) + 1), \ + } + +static const struct pinctrl_pin_desc lbg_pins[] = { + /* GPP_A */ + PINCTRL_PIN(0, "RCINB"), + PINCTRL_PIN(1, "LAD_0"), + PINCTRL_PIN(2, "LAD_1"), + PINCTRL_PIN(3, "LAD_2"), + PINCTRL_PIN(4, "LAD_3"), + PINCTRL_PIN(5, "LFRAMEB"), + PINCTRL_PIN(6, "SERIRQ"), + PINCTRL_PIN(7, "PIRQAB"), + PINCTRL_PIN(8, "CLKRUNB"), + PINCTRL_PIN(9, "CLKOUT_LPC_0"), + PINCTRL_PIN(10, "CLKOUT_LPC_1"), + PINCTRL_PIN(11, "PMEB"), + PINCTRL_PIN(12, "BM_BUSYB"), + PINCTRL_PIN(13, "SUSWARNB_SUSPWRDNACK"), + PINCTRL_PIN(14, "ESPI_RESETB"), + PINCTRL_PIN(15, "SUSACKB"), + PINCTRL_PIN(16, "CLKOUT_LPC_2"), + PINCTRL_PIN(17, "GPP_A_17"), + PINCTRL_PIN(18, "GPP_A_18"), + PINCTRL_PIN(19, "GPP_A_19"), + PINCTRL_PIN(20, "GPP_A_20"), + PINCTRL_PIN(21, "GPP_A_21"), + PINCTRL_PIN(22, "GPP_A_22"), + PINCTRL_PIN(23, "GPP_A_23"), + /* GPP_B */ + PINCTRL_PIN(24, "CORE_VID_0"), + PINCTRL_PIN(25, "CORE_VID_1"), + PINCTRL_PIN(26, "VRALERTB"), + PINCTRL_PIN(27, "CPU_GP_2"), + PINCTRL_PIN(28, "CPU_GP_3"), + PINCTRL_PIN(29, "SRCCLKREQB_0"), + PINCTRL_PIN(30, "SRCCLKREQB_1"), + PINCTRL_PIN(31, "SRCCLKREQB_2"), + PINCTRL_PIN(32, "SRCCLKREQB_3"), + PINCTRL_PIN(33, "SRCCLKREQB_4"), + PINCTRL_PIN(34, "SRCCLKREQB_5"), + PINCTRL_PIN(35, "GPP_B_11"), + PINCTRL_PIN(36, "GLB_RST_WARN_N"), + PINCTRL_PIN(37, "PLTRSTB"), + PINCTRL_PIN(38, "SPKR"), + PINCTRL_PIN(39, "GPP_B_15"), + PINCTRL_PIN(40, "GPP_B_16"), + PINCTRL_PIN(41, "GPP_B_17"), + PINCTRL_PIN(42, "GPP_B_18"), + PINCTRL_PIN(43, "GPP_B_19"), + PINCTRL_PIN(44, "GPP_B_20"), + PINCTRL_PIN(45, "GPP_B_21"), + PINCTRL_PIN(46, "GPP_B_22"), + PINCTRL_PIN(47, "SML1ALERTB"), + /* GPP_F */ + PINCTRL_PIN(48, "SATAXPCIE_3"), + PINCTRL_PIN(49, "SATAXPCIE_4"), + PINCTRL_PIN(50, "SATAXPCIE_5"), + PINCTRL_PIN(51, "SATAXPCIE_6"), + PINCTRL_PIN(52, "SATAXPCIE_7"), + PINCTRL_PIN(53, "SATA_DEVSLP_3"), + PINCTRL_PIN(54, "SATA_DEVSLP_4"), + PINCTRL_PIN(55, "SATA_DEVSLP_5"), + PINCTRL_PIN(56, "SATA_DEVSLP_6"), + PINCTRL_PIN(57, "SATA_DEVSLP_7"), + PINCTRL_PIN(58, "SATA_SCLOCK"), + PINCTRL_PIN(59, "SATA_SLOAD"), + PINCTRL_PIN(60, "SATA_SDATAOUT1"), + PINCTRL_PIN(61, "SATA_SDATAOUT0"), + PINCTRL_PIN(62, "SSATA_LEDB"), + PINCTRL_PIN(63, "USB2_OCB_4"), + PINCTRL_PIN(64, "USB2_OCB_5"), + PINCTRL_PIN(65, "USB2_OCB_6"), + PINCTRL_PIN(66, "USB2_OCB_7"), + PINCTRL_PIN(67, "GBE_SMBUS_CLK"), + PINCTRL_PIN(68, "GBE_SMBDATA"), + PINCTRL_PIN(69, "GBE_SMBALRTN"), + PINCTRL_PIN(70, "SSATA_SCLOCK"), + PINCTRL_PIN(71, "SSATA_SLOAD"), + /* GPP_C */ + PINCTRL_PIN(72, "SMBCLK"), + PINCTRL_PIN(73, "SMBDATA"), + PINCTRL_PIN(74, "SMBALERTB"), + PINCTRL_PIN(75, "SML0CLK"), + PINCTRL_PIN(76, "SML0DATA"), + PINCTRL_PIN(77, "SML0ALERTB"), + PINCTRL_PIN(78, "SML1CLK"), + PINCTRL_PIN(79, "SML1DATA"), + PINCTRL_PIN(80, "GPP_C_8"), + PINCTRL_PIN(81, "GPP_C_9"), + PINCTRL_PIN(82, "GPP_C_10"), + PINCTRL_PIN(83, "GPP_C_11"), + PINCTRL_PIN(84, "GPP_C_12"), + PINCTRL_PIN(85, "GPP_C_13"), + PINCTRL_PIN(86, "GPP_C_14"), + PINCTRL_PIN(87, "GPP_C_15"), + PINCTRL_PIN(88, "GPP_C_16"), + PINCTRL_PIN(89, "GPP_C_17"), + PINCTRL_PIN(90, "GPP_C_18"), + PINCTRL_PIN(91, "GPP_C_19"), + PINCTRL_PIN(92, "GPP_C_20"), + PINCTRL_PIN(93, "GPP_C_21"), + PINCTRL_PIN(94, "GPP_C_22"), + PINCTRL_PIN(95, "GPP_C_23"), + /* GPP_D */ + PINCTRL_PIN(96, "GPP_D_0"), + PINCTRL_PIN(97, "GPP_D_1"), + PINCTRL_PIN(98, "GPP_D_2"), + PINCTRL_PIN(99, "GPP_D_3"), + PINCTRL_PIN(100, "GPP_D_4"), + PINCTRL_PIN(101, "SSP0_SFRM"), + PINCTRL_PIN(102, "SSP0_TXD"), + PINCTRL_PIN(103, "SSP0_RXD"), + PINCTRL_PIN(104, "SSP0_SCLK"), + PINCTRL_PIN(105, "SSATA_DEVSLP_3"), + PINCTRL_PIN(106, "SSATA_DEVSLP_4"), + PINCTRL_PIN(107, "SSATA_DEVSLP_5"), + PINCTRL_PIN(108, "SSATA_SDATAOUT1"), + PINCTRL_PIN(109, "SML0BCLK_SML0BCLKIE"), + PINCTRL_PIN(110, "SML0BDATA_SML0BDATAIE"), + PINCTRL_PIN(111, "SSATA_SDATAOUT0"), + PINCTRL_PIN(112, "SML0BALERTB_SML0BALERTBIE"), + PINCTRL_PIN(113, "DMIC_CLK_1"), + PINCTRL_PIN(114, "DMIC_DATA_1"), + PINCTRL_PIN(115, "DMIC_CLK_0"), + PINCTRL_PIN(116, "DMIC_DATA_0"), + PINCTRL_PIN(117, "IE_UART_RXD"), + PINCTRL_PIN(118, "IE_UART_TXD"), + PINCTRL_PIN(119, "GPP_D_23"), + /* GPP_E */ + PINCTRL_PIN(120, "SATAXPCIE_0"), + PINCTRL_PIN(121, "SATAXPCIE_1"), + PINCTRL_PIN(122, "SATAXPCIE_2"), + PINCTRL_PIN(123, "CPU_GP_0"), + PINCTRL_PIN(124, "SATA_DEVSLP_0"), + PINCTRL_PIN(125, "SATA_DEVSLP_1"), + PINCTRL_PIN(126, "SATA_DEVSLP_2"), + PINCTRL_PIN(127, "CPU_GP_1"), + PINCTRL_PIN(128, "SATA_LEDB"), + PINCTRL_PIN(129, "USB2_OCB_0"), + PINCTRL_PIN(130, "USB2_OCB_1"), + PINCTRL_PIN(131, "USB2_OCB_2"), + PINCTRL_PIN(132, "USB2_OCB_3"), + /* GPP_I */ + PINCTRL_PIN(133, "GBE_TDO"), + PINCTRL_PIN(134, "GBE_TCK"), + PINCTRL_PIN(135, "GBE_TMS"), + PINCTRL_PIN(136, "GBE_TDI"), + PINCTRL_PIN(137, "DO_RESET_INB"), + PINCTRL_PIN(138, "DO_RESET_OUTB"), + PINCTRL_PIN(139, "RESET_DONE"), + PINCTRL_PIN(140, "GBE_TRST_N"), + PINCTRL_PIN(141, "GBE_PCI_DIS"), + PINCTRL_PIN(142, "GBE_LAN_DIS"), + PINCTRL_PIN(143, "GPP_I_10"), + PINCTRL_PIN(144, "GPIO_RCOMP_3P3"), + /* GPP_J */ + PINCTRL_PIN(145, "GBE_LED_0_0"), + PINCTRL_PIN(146, "GBE_LED_0_1"), + PINCTRL_PIN(147, "GBE_LED_1_0"), + PINCTRL_PIN(148, "GBE_LED_1_1"), + PINCTRL_PIN(149, "GBE_LED_2_0"), + PINCTRL_PIN(150, "GBE_LED_2_1"), + PINCTRL_PIN(151, "GBE_LED_3_0"), + PINCTRL_PIN(152, "GBE_LED_3_1"), + PINCTRL_PIN(153, "GBE_SCL_0"), + PINCTRL_PIN(154, "GBE_SDA_0"), + PINCTRL_PIN(155, "GBE_SCL_1"), + PINCTRL_PIN(156, "GBE_SDA_1"), + PINCTRL_PIN(157, "GBE_SCL_2"), + PINCTRL_PIN(158, "GBE_SDA_2"), + PINCTRL_PIN(159, "GBE_SCL_3"), + PINCTRL_PIN(160, "GBE_SDA_3"), + PINCTRL_PIN(161, "GBE_SDP_0_0"), + PINCTRL_PIN(162, "GBE_SDP_0_1"), + PINCTRL_PIN(163, "GBE_SDP_1_0"), + PINCTRL_PIN(164, "GBE_SDP_1_1"), + PINCTRL_PIN(165, "GBE_SDP_2_0"), + PINCTRL_PIN(166, "GBE_SDP_2_1"), + PINCTRL_PIN(167, "GBE_SDP_3_0"), + PINCTRL_PIN(168, "GBE_SDP_3_1"), + /* GPP_K */ + PINCTRL_PIN(169, "GBE_RMIICLK"), + PINCTRL_PIN(170, "GBE_RMII_TXD_0"), + PINCTRL_PIN(171, "GBE_RMII_TXD_1"), + PINCTRL_PIN(172, "GBE_RMII_TX_EN"), + PINCTRL_PIN(173, "GBE_RMII_CRS_DV"), + PINCTRL_PIN(174, "GBE_RMII_RXD_0"), + PINCTRL_PIN(175, "GBE_RMII_RXD_1"), + PINCTRL_PIN(176, "GBE_RMII_RX_ER"), + PINCTRL_PIN(177, "GBE_RMII_ARBIN"), + PINCTRL_PIN(178, "GBE_RMII_ARB_OUT"), + PINCTRL_PIN(179, "PE_RST_N"), + PINCTRL_PIN(180, "GPIO_RCOMP_1P8_3P3"), + /* GPP_G */ + PINCTRL_PIN(181, "FAN_TACH_0"), + PINCTRL_PIN(182, "FAN_TACH_1"), + PINCTRL_PIN(183, "FAN_TACH_2"), + PINCTRL_PIN(184, "FAN_TACH_3"), + PINCTRL_PIN(185, "FAN_TACH_4"), + PINCTRL_PIN(186, "FAN_TACH_5"), + PINCTRL_PIN(187, "FAN_TACH_6"), + PINCTRL_PIN(188, "FAN_TACH_7"), + PINCTRL_PIN(189, "FAN_PWM_0"), + PINCTRL_PIN(190, "FAN_PWM_1"), + PINCTRL_PIN(191, "FAN_PWM_2"), + PINCTRL_PIN(192, "FAN_PWM_3"), + PINCTRL_PIN(193, "GSXDOUT"), + PINCTRL_PIN(194, "GSXSLOAD"), + PINCTRL_PIN(195, "GSXDIN"), + PINCTRL_PIN(196, "GSXSRESETB"), + PINCTRL_PIN(197, "GSXCLK"), + PINCTRL_PIN(198, "ADR_COMPLETE"), + PINCTRL_PIN(199, "NMIB"), + PINCTRL_PIN(200, "SMIB"), + PINCTRL_PIN(201, "SSATA_DEVSLP_0"), + PINCTRL_PIN(202, "SSATA_DEVSLP_1"), + PINCTRL_PIN(203, "SSATA_DEVSLP_2"), + PINCTRL_PIN(204, "SSATAXPCIE0_SSATAGP0"), + /* GPP_H */ + PINCTRL_PIN(205, "SRCCLKREQB_6"), + PINCTRL_PIN(206, "SRCCLKREQB_7"), + PINCTRL_PIN(207, "SRCCLKREQB_8"), + PINCTRL_PIN(208, "SRCCLKREQB_9"), + PINCTRL_PIN(209, "SRCCLKREQB_10"), + PINCTRL_PIN(210, "SRCCLKREQB_11"), + PINCTRL_PIN(211, "SRCCLKREQB_12"), + PINCTRL_PIN(212, "SRCCLKREQB_13"), + PINCTRL_PIN(213, "SRCCLKREQB_14"), + PINCTRL_PIN(214, "SRCCLKREQB_15"), + PINCTRL_PIN(215, "SML2CLK"), + PINCTRL_PIN(216, "SML2DATA"), + PINCTRL_PIN(217, "SML2ALERTB"), + PINCTRL_PIN(218, "SML3CLK"), + PINCTRL_PIN(219, "SML3DATA"), + PINCTRL_PIN(220, "SML3ALERTB"), + PINCTRL_PIN(221, "SML4CLK"), + PINCTRL_PIN(222, "SML4DATA"), + PINCTRL_PIN(223, "SML4ALERTB"), + PINCTRL_PIN(224, "SSATAXPCIE1_SSATAGP1"), + PINCTRL_PIN(225, "SSATAXPCIE2_SSATAGP2"), + PINCTRL_PIN(226, "SSATAXPCIE3_SSATAGP3"), + PINCTRL_PIN(227, "SSATAXPCIE4_SSATAGP4"), + PINCTRL_PIN(228, "SSATAXPCIE5_SSATAGP5"), + /* GPP_L */ + PINCTRL_PIN(229, "VISA2CH0_D0"), + PINCTRL_PIN(230, "VISA2CH0_D1"), + PINCTRL_PIN(231, "VISA2CH0_D2"), + PINCTRL_PIN(232, "VISA2CH0_D3"), + PINCTRL_PIN(233, "VISA2CH0_D4"), + PINCTRL_PIN(234, "VISA2CH0_D5"), + PINCTRL_PIN(235, "VISA2CH0_D6"), + PINCTRL_PIN(236, "VISA2CH0_D7"), + PINCTRL_PIN(237, "VISA2CH0_CLK"), + PINCTRL_PIN(238, "VISA2CH1_D0"), + PINCTRL_PIN(239, "VISA2CH1_D1"), + PINCTRL_PIN(240, "VISA2CH1_D2"), + PINCTRL_PIN(241, "VISA2CH1_D3"), + PINCTRL_PIN(242, "VISA2CH1_D4"), + PINCTRL_PIN(243, "VISA2CH1_D5"), + PINCTRL_PIN(244, "VISA2CH1_D6"), + PINCTRL_PIN(245, "VISA2CH1_D7"), + PINCTRL_PIN(246, "VISA2CH1_CLK"), +}; + +static const struct intel_community lbg_communities[] = { + LBG_COMMUNITY(0, 0, 71), + LBG_COMMUNITY(1, 72, 132), + LBG_COMMUNITY(3, 133, 144), + LBG_COMMUNITY(4, 145, 180), + LBG_COMMUNITY(5, 181, 246), +}; + +static const struct intel_pinctrl_soc_data lbg_soc_data = { + .pins = lbg_pins, + .npins = ARRAY_SIZE(lbg_pins), + .communities = lbg_communities, + .ncommunities = ARRAY_SIZE(lbg_communities), +}; + +static int lbg_pinctrl_probe(struct platform_device *pdev) +{ + return intel_pinctrl_probe(pdev, &lbg_soc_data); +} + +static const struct dev_pm_ops lbg_pinctrl_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend, + intel_pinctrl_resume) +}; + +static const struct acpi_device_id lbg_pinctrl_acpi_match[] = { + { "INT3536" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, lbg_pinctrl_acpi_match); + +static struct platform_driver lbg_pinctrl_driver = { + .probe = lbg_pinctrl_probe, + .driver = { + .name = "lewisburg-pinctrl", + .acpi_match_table = lbg_pinctrl_acpi_match, + .pm = &lbg_pinctrl_pm_ops, + }, +}; + +module_platform_driver(lbg_pinctrl_driver); + +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); +MODULE_DESCRIPTION("Intel Lewisburg pinctrl/GPIO driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig index d42f29a5eb65..57183fce70fb 100644 --- a/drivers/scsi/hisi_sas/Kconfig +++ b/drivers/scsi/hisi_sas/Kconfig @@ -1,6 +1,6 @@ config SCSI_HISI_SAS tristate "HiSilicon SAS" - depends on HAS_DMA && HAS_IOMEM + depends on HAS_IOMEM depends on ARM64 || COMPILE_TEST select SCSI_SAS_LIBSAS select BLK_DEV_INTEGRITY diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 69e714822fc0..5999eaf131e6 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -99,19 +99,52 @@ struct hisi_sas_hw_error { const struct hisi_sas_hw_error *sub; }; +struct hisi_sas_rst { + struct hisi_hba *hisi_hba; + struct completion *completion; + struct work_struct work; + bool done; +}; + +#define HISI_SAS_RST_WORK_INIT(r, c) \ + { .hisi_hba = hisi_hba, \ + .completion = &c, \ + .work = __WORK_INITIALIZER(r.work, \ + hisi_sas_sync_rst_work_handler), \ + .done = false, \ + } + +#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \ + DECLARE_COMPLETION_ONSTACK(c); \ + DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \ + struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c) + +enum hisi_sas_bit_err_type { + HISI_SAS_ERR_SINGLE_BIT_ECC = 0x0, + HISI_SAS_ERR_MULTI_BIT_ECC = 0x1, +}; + +enum hisi_sas_phy_event { + HISI_PHYE_PHY_UP = 0U, + HISI_PHYE_LINK_RESET, + HISI_PHYES_NUM, +}; + struct hisi_sas_phy { + struct work_struct works[HISI_PHYES_NUM]; struct hisi_hba *hisi_hba; struct hisi_sas_port *port; struct asd_sas_phy sas_phy; struct sas_identify identify; struct timer_list timer; - struct work_struct phyup_ws; + struct completion *reset_completion; + spinlock_t lock; u64 port_id; /* from hw */ - u64 dev_sas_addr; u64 frame_rcvd_size; u8 frame_rcvd[32]; u8 phy_attached; - u8 reserved[3]; + u8 in_reset; + u8 reserved[2]; u32 phy_type; enum sas_linkrate minimum_linkrate; enum sas_linkrate maximum_linkrate; @@ -132,7 +165,7 @@ struct hisi_sas_cq { struct hisi_sas_dq { struct hisi_hba *hisi_hba; - struct hisi_sas_slot *slot_prep; + struct list_head list; spinlock_t lock; int wr_point; int id; @@ -144,16 +177,22 @@ struct hisi_sas_device { struct completion *completion; struct hisi_sas_dq *dq; struct list_head list; - u64 attached_phy; - atomic64_t running_req; enum sas_device_type dev_type; int device_id; int sata_idx; u8 dev_status; }; +struct hisi_sas_tmf_task { + int force_phy; + int phy_id; + u8 tmf; + u16 tag_of_task_to_be_managed; +}; + struct hisi_sas_slot { struct list_head entry; + struct list_head delivery; struct sas_task *task; struct hisi_sas_port *port; u64 n_elem; @@ -163,17 +202,15 @@ struct hisi_sas_slot { int cmplt_queue_slot; int idx; int abort; + int ready; void *buf; dma_addr_t buf_dma; void *cmd_hdr; dma_addr_t cmd_hdr_dma; struct work_struct abort_slot; struct timer_list internal_abort_timer; -}; - -struct hisi_sas_tmf_task { - u8 tmf; - u16 tag_of_task_to_be_managed; + bool is_internal; + struct hisi_sas_tmf_task *tmf; }; struct hisi_sas_hw { @@ -186,14 +223,13 @@ struct hisi_sas_hw { void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq); void (*start_delivery)(struct hisi_sas_dq *dq); - int (*prep_ssp)(struct hisi_hba *hisi_hba, - struct hisi_sas_slot *slot, int is_tmf, - struct hisi_sas_tmf_task *tmf); - int (*prep_smp)(struct hisi_hba *hisi_hba, + void (*prep_ssp)(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot); - int (*prep_stp)(struct hisi_hba *hisi_hba, + void (*prep_smp)(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot); - int (*prep_abort)(struct hisi_hba *hisi_hba, + void (*prep_stp)(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot); + void (*prep_abort)(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, int device_id, int abort_flag, int tag_to_abort); int (*slot_complete)(struct hisi_hba *hisi_hba, @@ -206,8 +242,9 @@ struct hisi_sas_hw { void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *linkrates); enum sas_linkrate (*phy_get_max_linkrate)(void); - void (*free_device)(struct hisi_hba *hisi_hba, + void (*clear_itct)(struct hisi_hba *hisi_hba, struct hisi_sas_device *dev); + void (*free_device)(struct hisi_sas_device *sas_dev); int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id); void (*dereg_device)(struct hisi_hba *hisi_hba, struct domain_device *device); @@ -215,8 +252,11 @@ struct hisi_sas_hw { u32 (*get_phys_state)(struct hisi_hba *hisi_hba); int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data); + void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms); int max_command_entries; int complete_hdr_size; + struct scsi_host_template *sht; }; struct hisi_hba { @@ -243,6 +283,8 @@ struct hisi_hba { struct workqueue_struct *wq; int slot_index_count; + int last_slot_index; + int last_dev_id; unsigned long *slot_index_tags; unsigned long reject_stp_links_msk; @@ -381,7 +423,7 @@ struct hisi_sas_command_table_ssp { union { struct { struct ssp_command_iu task; - u32 prot[6]; + u32 prot[7]; }; struct ssp_tmf_iu ssp_task; struct xfer_rdy_iu xfer_rdy; @@ -407,13 +449,11 @@ struct hisi_sas_slot_buf_table { }; extern struct scsi_transport_template *hisi_sas_stt; -extern struct scsi_host_template *hisi_sas_sht; - extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba); -extern void hisi_sas_init_add(struct hisi_hba *hisi_hba); extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost); extern void hisi_sas_free(struct hisi_hba *hisi_hba); -extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction); +extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, + int direction); extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); extern void hisi_sas_sata_done(struct sas_task *task, struct hisi_sas_slot *slot); @@ -423,11 +463,21 @@ extern int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *ops); extern int hisi_sas_remove(struct platform_device *pdev); +extern int hisi_sas_slave_configure(struct scsi_device *sdev); +extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time); +extern void hisi_sas_scan_start(struct Scsi_Host *shost); +extern struct device_attribute *host_attrs[]; +extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type); extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy); extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot); extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba); extern void hisi_sas_rst_work_handler(struct work_struct *work); +extern void hisi_sas_sync_rst_work_handler(struct work_struct *work); extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba); +extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, + enum hisi_sas_phy_event event); +extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba); +extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max); #endif diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 14bdc72bcd25..7308464503dc 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -22,16 +22,21 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, struct domain_device *device, int abort_flag, int tag); static int hisi_sas_softreset_ata_disk(struct domain_device *device); +static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata); +static void hisi_sas_release_task(struct hisi_hba *hisi_hba, + struct domain_device *device); +static void hisi_sas_dev_gone(struct domain_device *device); -u8 hisi_sas_get_ata_protocol(u8 cmd, int direction) +u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) { - switch (cmd) { + switch (fis->command) { case ATA_CMD_FPDMA_WRITE: case ATA_CMD_FPDMA_READ: case ATA_CMD_FPDMA_RECV: case ATA_CMD_FPDMA_SEND: case ATA_CMD_NCQ_NON_DATA: - return HISI_SAS_SATA_PROTOCOL_FPDMA; + return HISI_SAS_SATA_PROTOCOL_FPDMA; case ATA_CMD_DOWNLOAD_MICRO: case ATA_CMD_ID_ATA: @@ -43,7 +48,7 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction) case ATA_CMD_WRITE_LOG_EXT: case ATA_CMD_PIO_WRITE: case ATA_CMD_PIO_WRITE_EXT: - return HISI_SAS_SATA_PROTOCOL_PIO; + return HISI_SAS_SATA_PROTOCOL_PIO; case ATA_CMD_DSM: case ATA_CMD_DOWNLOAD_MICRO_DMA: @@ -62,7 +67,7 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction) case ATA_CMD_WRITE_LOG_DMA_EXT: case ATA_CMD_WRITE_STREAM_DMA_EXT: case ATA_CMD_ZAC_MGMT_IN: - return HISI_SAS_SATA_PROTOCOL_DMA; + return HISI_SAS_SATA_PROTOCOL_DMA; case ATA_CMD_CHK_POWER: case ATA_CMD_DEV_RESET: @@ -75,12 +80,29 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction) case ATA_CMD_STANDBY: case ATA_CMD_STANDBYNOW1: case ATA_CMD_ZAC_MGMT_OUT: - return HISI_SAS_SATA_PROTOCOL_NONDATA; + return HISI_SAS_SATA_PROTOCOL_NONDATA; + + case ATA_CMD_SET_MAX: + switch (fis->features) { + case ATA_SET_MAX_PASSWD: + case ATA_SET_MAX_LOCK: + return HISI_SAS_SATA_PROTOCOL_PIO; + + case ATA_SET_MAX_PASSWD_DMA: + case ATA_SET_MAX_UNLOCK_DMA: + return HISI_SAS_SATA_PROTOCOL_DMA; + + default: + return HISI_SAS_SATA_PROTOCOL_NONDATA; + } + default: + { if (direction == DMA_NONE) return HISI_SAS_SATA_PROTOCOL_NONDATA; return HISI_SAS_SATA_PROTOCOL_PIO; } + } } EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); @@ -116,6 +138,22 @@ int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag) } EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); +/* + * This function assumes linkrate mask fits in 8 bits, which it + * does for all HW versions supported. + */ +u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) +{ + u16 rate = 0; + int i; + + max -= SAS_LINK_RATE_1_5_GBPS; + for (i = 0; i <= max; i++) + rate |= 1 << (i * 2); + return rate; +} +EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); + static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) { return device->port->ha->lldd_ha; @@ -160,11 +198,18 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx) unsigned int index; void *bitmap = hisi_hba->slot_index_tags; - index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count); - if (index >= hisi_hba->slot_index_count) - return -SAS_QUEUE_FULL; + index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, + hisi_hba->last_slot_index + 1); + if (index >= hisi_hba->slot_index_count) { + index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, + 0); + if (index >= hisi_hba->slot_index_count) + return -SAS_QUEUE_FULL; + } hisi_sas_slot_index_set(hisi_hba, index); *slot_idx = index; + hisi_hba->last_slot_index = index; + return 0; } @@ -179,11 +224,11 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) { + struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue]; + unsigned long flags; if (task) { struct device *dev = hisi_hba->dev; - struct domain_device *device = task->dev; - struct hisi_sas_device *sas_dev = device->lldd_dev; if (!task->lldd_task) return; @@ -192,50 +237,51 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, if (!sas_protocol_ata(task->task_proto)) if (slot->n_elem) - dma_unmap_sg(dev, task->scatter, slot->n_elem, + dma_unmap_sg(dev, task->scatter, + task->num_scatter, task->data_dir); - - if (sas_dev) - atomic64_dec(&sas_dev->running_req); } if (slot->buf) dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma); + spin_lock_irqsave(&dq->lock, flags); list_del_init(&slot->entry); + spin_unlock_irqrestore(&dq->lock, flags); slot->buf = NULL; slot->task = NULL; slot->port = NULL; + spin_lock_irqsave(&hisi_hba->lock, flags); hisi_sas_slot_index_free(hisi_hba, slot->idx); + spin_unlock_irqrestore(&hisi_hba->lock, flags); /* slot memory is fully zeroed when it is reused */ } EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); -static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, +static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { - return hisi_hba->hw->prep_smp(hisi_hba, slot); + hisi_hba->hw->prep_smp(hisi_hba, slot); } -static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, - struct hisi_sas_slot *slot, int is_tmf, - struct hisi_sas_tmf_task *tmf) +static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) { - return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf); + hisi_hba->hw->prep_ssp(hisi_hba, slot); } -static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, +static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { - return hisi_hba->hw->prep_stp(hisi_hba, slot); + hisi_hba->hw->prep_stp(hisi_hba, slot); } -static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, +static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, int device_id, int abort_flag, int tag_to_abort) { - return hisi_hba->hw->prep_abort(hisi_hba, slot, + hisi_hba->hw->prep_abort(hisi_hba, slot, device_id, abort_flag, tag_to_abort); } @@ -255,7 +301,6 @@ static void hisi_sas_slot_abort(struct work_struct *work) struct scsi_lun lun; struct device *dev = hisi_hba->dev; int tag = abort_slot->idx; - unsigned long flags; if (!(task->task_proto & SAS_PROTOCOL_SSP)) { dev_err(dev, "cannot abort slot for non-ssp task\n"); @@ -269,27 +314,29 @@ static void hisi_sas_slot_abort(struct work_struct *work) hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task); out: /* Do cleanup for this task */ - spin_lock_irqsave(&hisi_hba->lock, flags); hisi_sas_slot_task_free(hisi_hba, task, abort_slot); - spin_unlock_irqrestore(&hisi_hba->lock, flags); if (task->task_done) task->task_done(task); } -static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq - *dq, int is_tmf, struct hisi_sas_tmf_task *tmf, - int *pass) +static int hisi_sas_task_prep(struct sas_task *task, + struct hisi_sas_dq **dq_pointer, + bool is_tmf, struct hisi_sas_tmf_task *tmf, + int *pass) { - struct hisi_hba *hisi_hba = dq->hisi_hba; struct domain_device *device = task->dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_port *port; struct hisi_sas_slot *slot; struct hisi_sas_cmd_hdr *cmd_hdr_base; struct asd_sas_port *sas_port = device->port; struct device *dev = hisi_hba->dev; - int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; - unsigned long flags; + int dlvry_queue_slot, dlvry_queue, rc, slot_idx; + int n_elem = 0, n_elem_req = 0, n_elem_resp = 0; + unsigned long flags, flags_dq; + struct hisi_sas_dq *dq; + int wr_q_index; if (!sas_port) { struct task_status_struct *ts = &task->task_status; @@ -302,7 +349,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq */ if (device->dev_type != SAS_SATA_DEV) task->task_done(task); - return SAS_PHY_DOWN; + return -ECOMM; } if (DEV_IS_GONE(sas_dev)) { @@ -313,9 +360,11 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq dev_info(dev, "task prep: device %016llx not ready\n", SAS_ADDR(device->sas_addr)); - return SAS_PHY_DOWN; + return -ECOMM; } + *dq_pointer = dq = sas_dev->dq; + port = to_hisi_sas_port(sas_port); if (port && !port->port_attached) { dev_info(dev, "task prep: %s port%d not attach device\n", @@ -323,10 +372,12 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq "SATA/STP" : "SAS", device->port->id); - return SAS_PHY_DOWN; + return -ECOMM; } if (!sas_protocol_ata(task->task_proto)) { + unsigned int req_len, resp_len; + if (task->num_scatter) { n_elem = dma_map_sg(dev, task->scatter, task->num_scatter, task->data_dir); @@ -334,31 +385,74 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq rc = -ENOMEM; goto prep_out; } + } else if (task->task_proto & SAS_PROTOCOL_SMP) { + n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, + 1, DMA_TO_DEVICE); + if (!n_elem_req) { + rc = -ENOMEM; + goto prep_out; + } + req_len = sg_dma_len(&task->smp_task.smp_req); + if (req_len & 0x3) { + rc = -EINVAL; + goto err_out_dma_unmap; + } + n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp, + 1, DMA_FROM_DEVICE); + if (!n_elem_resp) { + rc = -ENOMEM; + goto err_out_dma_unmap; + } + resp_len = sg_dma_len(&task->smp_task.smp_resp); + if (resp_len & 0x3) { + rc = -EINVAL; + goto err_out_dma_unmap; + } } } else n_elem = task->num_scatter; + if (n_elem > HISI_SAS_SGE_PAGE_CNT) { + dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", + n_elem); + rc = -EINVAL; + goto err_out_dma_unmap; + } + spin_lock_irqsave(&hisi_hba->lock, flags); if (hisi_hba->hw->slot_index_alloc) rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx, device); else rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); - if (rc) { - spin_unlock_irqrestore(&hisi_hba->lock, flags); - goto err_out; - } spin_unlock_irqrestore(&hisi_hba->lock, flags); - - rc = hisi_hba->hw->get_free_slot(hisi_hba, dq); if (rc) - goto err_out_tag; + goto err_out_dma_unmap; - dlvry_queue = dq->id; - dlvry_queue_slot = dq->wr_point; slot = &hisi_hba->slot_info[slot_idx]; memset(slot, 0, sizeof(struct hisi_sas_slot)); + slot->buf = dma_pool_alloc(hisi_hba->buffer_pool, + GFP_ATOMIC, &slot->buf_dma); + if (!slot->buf) { + rc = -ENOMEM; + goto err_out_tag; + } + + spin_lock_irqsave(&dq->lock, flags_dq); + wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); + if (wr_q_index < 0) { + spin_unlock_irqrestore(&dq->lock, flags_dq); + rc = -EAGAIN; + goto err_out_buf; + } + + list_add_tail(&slot->delivery, &dq->list); + spin_unlock_irqrestore(&dq->lock, flags_dq); + + dlvry_queue = dq->id; + dlvry_queue_slot = wr_q_index; + slot->idx = slot_idx; slot->n_elem = n_elem; slot->dlvry_queue = dlvry_queue; @@ -367,100 +461,94 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; slot->task = task; slot->port = port; + slot->tmf = tmf; + slot->is_internal = is_tmf; task->lldd_task = slot; INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort); - slot->buf = dma_pool_alloc(hisi_hba->buffer_pool, - GFP_ATOMIC, &slot->buf_dma); - if (!slot->buf) { - rc = -ENOMEM; - goto err_out_slot_buf; - } memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); switch (task->task_proto) { case SAS_PROTOCOL_SMP: - rc = hisi_sas_task_prep_smp(hisi_hba, slot); + hisi_sas_task_prep_smp(hisi_hba, slot); break; case SAS_PROTOCOL_SSP: - rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf); + hisi_sas_task_prep_ssp(hisi_hba, slot); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - rc = hisi_sas_task_prep_ata(hisi_hba, slot); + hisi_sas_task_prep_ata(hisi_hba, slot); break; default: dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", task->task_proto); - rc = -EINVAL; break; } - if (rc) { - dev_err(dev, "task prep: rc = 0x%x\n", rc); - goto err_out_buf; - } - - spin_lock_irqsave(&hisi_hba->lock, flags); + spin_lock_irqsave(&dq->lock, flags); list_add_tail(&slot->entry, &sas_dev->list); - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock_irqrestore(&dq->lock, flags); spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&task->task_state_lock, flags); - dq->slot_prep = slot; - - atomic64_inc(&sas_dev->running_req); ++(*pass); + slot->ready = 1; return 0; err_out_buf: dma_pool_free(hisi_hba->buffer_pool, slot->buf, - slot->buf_dma); -err_out_slot_buf: - /* Nothing to be done */ + slot->buf_dma); err_out_tag: spin_lock_irqsave(&hisi_hba->lock, flags); hisi_sas_slot_index_free(hisi_hba, slot_idx); spin_unlock_irqrestore(&hisi_hba->lock, flags); -err_out: - dev_err(dev, "task prep: failed[%d]!\n", rc); - if (!sas_protocol_ata(task->task_proto)) - if (n_elem) - dma_unmap_sg(dev, task->scatter, n_elem, - task->data_dir); +err_out_dma_unmap: + if (!sas_protocol_ata(task->task_proto)) { + if (task->num_scatter) { + dma_unmap_sg(dev, task->scatter, task->num_scatter, + task->data_dir); + } else if (task->task_proto & SAS_PROTOCOL_SMP) { + if (n_elem_req) + dma_unmap_sg(dev, &task->smp_task.smp_req, + 1, DMA_TO_DEVICE); + if (n_elem_resp) + dma_unmap_sg(dev, &task->smp_task.smp_resp, + 1, DMA_FROM_DEVICE); + } + } prep_out: + dev_err(dev, "task prep: failed[%d]!\n", rc); return rc; } static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, - int is_tmf, struct hisi_sas_tmf_task *tmf) + bool is_tmf, struct hisi_sas_tmf_task *tmf) { u32 rc; u32 pass = 0; unsigned long flags; struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); struct device *dev = hisi_hba->dev; - struct domain_device *device = task->dev; - struct hisi_sas_device *sas_dev = device->lldd_dev; - struct hisi_sas_dq *dq = sas_dev->dq; + struct hisi_sas_dq *dq = NULL; if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) return -EINVAL; /* protect task_prep and start_delivery sequence */ - spin_lock_irqsave(&dq->lock, flags); - rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass); + rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); if (rc) dev_err(dev, "task exec: failed[%d]!\n", rc); - if (likely(pass)) + if (likely(pass)) { + spin_lock_irqsave(&dq->lock, flags); hisi_hba->hw->start_delivery(dq); - spin_unlock_irqrestore(&dq->lock, flags); + spin_unlock_irqrestore(&dq->lock, flags); + } return rc; } @@ -511,10 +599,12 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct hisi_sas_device *sas_dev = NULL; unsigned long flags; + int last = hisi_hba->last_dev_id; + int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; int i; spin_lock_irqsave(&hisi_hba->lock, flags); - for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { int queue = i % hisi_hba->queue_count; struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; @@ -529,18 +619,57 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } + i++; } + hisi_hba->last_dev_id = i; spin_unlock_irqrestore(&hisi_hba->lock, flags); return sas_dev; } +#define HISI_SAS_SRST_ATA_DISK_CNT 3 +static int hisi_sas_init_device(struct domain_device *device) +{ + int rc = TMF_RESP_FUNC_COMPLETE; + struct scsi_lun lun; + struct hisi_sas_tmf_task tmf_task; + int retry = HISI_SAS_SRST_ATA_DISK_CNT; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + + switch (device->dev_type) { + case SAS_END_DEVICE: + int_to_scsilun(0, &lun); + + tmf_task.tmf = TMF_CLEAR_TASK_SET; + rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, + &tmf_task); + if (rc == TMF_RESP_FUNC_COMPLETE) + hisi_sas_release_task(hisi_hba, device); + break; + case SAS_SATA_DEV: + case SAS_SATA_PM: + case SAS_SATA_PM_PORT: + case SAS_SATA_PENDING: + while (retry-- > 0) { + rc = hisi_sas_softreset_ata_disk(device); + if (!rc) + break; + } + break; + default: + break; + } + + return rc; +} + static int hisi_sas_dev_found(struct domain_device *device) { struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct domain_device *parent_dev = device->parent; struct hisi_sas_device *sas_dev; struct device *dev = hisi_hba->dev; + int rc; if (hisi_hba->hw->alloc_dev) sas_dev = hisi_hba->hw->alloc_dev(device); @@ -563,10 +692,8 @@ static int hisi_sas_dev_found(struct domain_device *device) for (phy_no = 0; phy_no < phy_num; phy_no++) { phy = &parent_dev->ex_dev.ex_phy[phy_no]; if (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(device->sas_addr)) { - sas_dev->attached_phy = phy_no; + SAS_ADDR(device->sas_addr)) break; - } } if (phy_no == phy_num) { @@ -574,14 +701,25 @@ static int hisi_sas_dev_found(struct domain_device *device) "dev:%016llx at ex:%016llx\n", SAS_ADDR(device->sas_addr), SAS_ADDR(parent_dev->sas_addr)); - return -EINVAL; + rc = -EINVAL; + goto err_out; } } + dev_info(dev, "dev[%d:%x] found\n", + sas_dev->device_id, sas_dev->dev_type); + + rc = hisi_sas_init_device(device); + if (rc) + goto err_out; return 0; + +err_out: + hisi_sas_dev_gone(device); + return rc; } -static int hisi_sas_slave_configure(struct scsi_device *sdev) +int hisi_sas_slave_configure(struct scsi_device *sdev) { struct domain_device *dev = sdev_to_domain_dev(sdev); int ret = sas_slave_configure(sdev); @@ -593,15 +731,17 @@ static int hisi_sas_slave_configure(struct scsi_device *sdev) return 0; } +EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); -static void hisi_sas_scan_start(struct Scsi_Host *shost) +void hisi_sas_scan_start(struct Scsi_Host *shost) { struct hisi_hba *hisi_hba = shost_priv(shost); hisi_hba->hw->phys_init(hisi_hba); } +EXPORT_SYMBOL_GPL(hisi_sas_scan_start); -static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) +int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct hisi_hba *hisi_hba = shost_priv(shost); struct sas_ha_struct *sha = &hisi_hba->sha; @@ -613,11 +753,12 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) sas_drain_work(sha); return 1; } +EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); static void hisi_sas_phyup_work(struct work_struct *work) { struct hisi_sas_phy *phy = - container_of(work, struct hisi_sas_phy, phyup_ws); + container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); struct hisi_hba *hisi_hba = phy->hisi_hba; struct asd_sas_phy *sas_phy = &phy->sas_phy; int phy_no = sas_phy->id; @@ -626,13 +767,42 @@ static void hisi_sas_phyup_work(struct work_struct *work) hisi_sas_bytes_dmaed(hisi_hba, phy_no); } +static void hisi_sas_linkreset_work(struct work_struct *work) +{ + struct hisi_sas_phy *phy = + container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); +} + +static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { + [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, + [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, +}; + +bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, + enum hisi_sas_phy_event event) +{ + struct hisi_hba *hisi_hba = phy->hisi_hba; + + if (WARN_ON(event >= HISI_PHYES_NUM)) + return false; + + return queue_work(hisi_hba->wq, &phy->works[event]); +} +EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); + static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; + int i; phy->hisi_hba = hisi_hba; phy->port = NULL; + phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); init_timer(&phy->timer); sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; sas_phy->class = SAS; @@ -648,7 +818,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; sas_phy->lldd_phy = phy; - INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work); + for (i = 0; i < HISI_PHYES_NUM; i++) + INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); } static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) @@ -703,7 +874,7 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba, hisi_sas_do_release_task(hisi_hba, slot->task, slot); } -static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) +void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) { struct hisi_sas_device *sas_dev; struct domain_device *device; @@ -720,6 +891,7 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) hisi_sas_release_task(hisi_hba, device); } } +EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, struct domain_device *device) @@ -734,17 +906,21 @@ static void hisi_sas_dev_gone(struct domain_device *device) struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; - dev_info(dev, "found dev[%d:%x] is gone\n", + dev_info(dev, "dev[%d:%x] is gone\n", sas_dev->device_id, sas_dev->dev_type); - hisi_sas_internal_task_abort(hisi_hba, device, + if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { + hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_DEV, 0); - hisi_sas_dereg_device(hisi_hba, device); + hisi_sas_dereg_device(hisi_hba, device); + + hisi_hba->hw->clear_itct(hisi_hba, sas_dev); + device->lldd_dev = NULL; + } - hisi_hba->hw->free_device(hisi_hba, sas_dev); - device->lldd_dev = NULL; - memset(sas_dev, 0, sizeof(*sas_dev)); + if (hisi_hba->hw->free_device) + hisi_hba->hw->free_device(sas_dev); sas_dev->dev_type = SAS_PHY_UNUSED; } @@ -753,6 +929,33 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) return hisi_sas_task_exec(task, gfp_flags, 0, NULL); } +static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, + struct sas_phy_linkrates *r) +{ + struct sas_phy_linkrates _r; + + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + enum sas_linkrate min, max; + + if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { + max = sas_phy->phy->maximum_linkrate; + min = r->minimum_linkrate; + } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { + max = r->maximum_linkrate; + min = sas_phy->phy->minimum_linkrate; + } else + return; + + _r.maximum_linkrate = max; + _r.minimum_linkrate = min; + + hisi_hba->hw->phy_disable(hisi_hba, phy_no); + msleep(100); + hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); + hisi_hba->hw->phy_start(hisi_hba, phy_no); +} + static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) { @@ -776,7 +979,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_SET_LINK_RATE: - hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata); + hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); break; case PHY_FUNC_GET_EVENTS: if (hisi_hba->hw->get_events) { @@ -813,6 +1016,7 @@ static void hisi_sas_tmf_timedout(unsigned long data) #define TASK_TIMEOUT 20 #define TASK_RETRY 3 +#define INTERNAL_ABORT_TIMEOUT 6 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, void *parameter, u32 para_len, struct hisi_sas_tmf_task *tmf) @@ -860,12 +1064,13 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { struct hisi_sas_slot *slot = task->lldd_task; - dev_err(dev, "abort tmf: TMF task timeout\n"); + dev_err(dev, "abort tmf: TMF task timeout and not done\n"); if (slot) slot->task = NULL; goto ex_err; - } + } else + dev_err(dev, "abort tmf: TMF task timeout\n"); } if (task->task_status.resp == SAS_TASK_COMPLETE && @@ -938,7 +1143,6 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; int s = sizeof(struct host_to_dev_fis); - unsigned long flags; ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); @@ -963,11 +1167,8 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) dev_err(dev, "ata disk reset failed\n"); } - if (rc == TMF_RESP_FUNC_COMPLETE) { - spin_lock_irqsave(&hisi_hba->lock, flags); + if (rc == TMF_RESP_FUNC_COMPLETE) hisi_sas_release_task(hisi_hba, device); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - } return rc; } @@ -986,27 +1187,42 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, sizeof(ssp_task), tmf); } -static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba, - struct asd_sas_port *sas_port, enum sas_linkrate linkrate) +static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) { - struct hisi_sas_device *sas_dev; - struct domain_device *device; + u32 state = hisi_hba->hw->get_phys_state(hisi_hba); int i; for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { - sas_dev = &hisi_hba->devices[i]; - device = sas_dev->sas_device; + struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; + struct domain_device *device = sas_dev->sas_device; + struct asd_sas_port *sas_port; + struct hisi_sas_port *port; + struct hisi_sas_phy *phy = NULL; + struct asd_sas_phy *sas_phy; + if ((sas_dev->dev_type == SAS_PHY_UNUSED) - || !device || (device->port != sas_port)) + || !device || !device->port) continue; - hisi_hba->hw->free_device(hisi_hba, sas_dev); + sas_port = device->port; + port = to_hisi_sas_port(sas_port); + + list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) + if (state & BIT(sas_phy->id)) { + phy = sas_phy->lldd_phy; + break; + } + + if (phy) { + port->id = phy->port_id; - /* Update linkrate of directly attached device. */ - if (!device->parent) - device->linkrate = linkrate; + /* Update linkrate of directly attached device. */ + if (!device->parent) + device->linkrate = phy->sas_phy.linkrate; - hisi_hba->hw->setup_itct(hisi_hba, sas_dev); + hisi_hba->hw->setup_itct(hisi_hba, sas_dev); + } else + port->id = 0xff; } } @@ -1021,21 +1237,17 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct asd_sas_port *sas_port = sas_phy->port; - struct hisi_sas_port *port = to_hisi_sas_port(sas_port); bool do_port_check = !!(_sas_port != sas_port); if (!sas_phy->phy->enabled) continue; /* Report PHY state change to libsas */ - if (state & (1 << phy_no)) { - if (do_port_check && sas_port) { + if (state & BIT(phy_no)) { + if (do_port_check && sas_port && sas_port->port_dev) { struct domain_device *dev = sas_port->port_dev; _sas_port = sas_port; - port->id = phy->port_id; - hisi_sas_refresh_port_id(hisi_hba, - sas_port, sas_phy->linkrate); if (DEV_IS_EXPANDER(dev->dev_type)) sas_ha->notify_port_event(sas_phy, @@ -1046,8 +1258,98 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, hisi_sas_phy_down(hisi_hba, phy_no, 0); } +} - drain_workqueue(hisi_hba->shost->work_q); +static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) +{ + struct hisi_sas_device *sas_dev; + struct domain_device *device; + int i; + + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + sas_dev = &hisi_hba->devices[i]; + device = sas_dev->sas_device; + + if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) + continue; + + hisi_sas_init_device(device); + } +} + +static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, + struct asd_sas_port *sas_port, + struct domain_device *device) +{ + struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 }; + struct ata_port *ap = device->sata_dev.ap; + struct device *dev = hisi_hba->dev; + int s = sizeof(struct host_to_dev_fis); + int rc = TMF_RESP_FUNC_FAILED; + struct asd_sas_phy *sas_phy; + struct ata_link *link; + u8 fis[20] = {0}; + u32 state; + + state = hisi_hba->hw->get_phys_state(hisi_hba); + list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) { + if (!(state & BIT(sas_phy->id))) + continue; + + ata_for_each_link(link, ap, EDGE) { + int pmp = sata_srst_pmp(link); + + tmf_task.phy_id = sas_phy->id; + hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); + rc = hisi_sas_exec_internal_tmf_task(device, fis, s, + &tmf_task); + if (rc != TMF_RESP_FUNC_COMPLETE) { + dev_err(dev, "phy%d ata reset failed rc=%d\n", + sas_phy->id, rc); + break; + } + } + } +} + +static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int port_no, rc, i; + + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; + struct domain_device *device = sas_dev->sas_device; + + if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) + continue; + + rc = hisi_sas_internal_task_abort(hisi_hba, device, + HISI_SAS_INT_ABT_DEV, 0); + if (rc < 0) + dev_err(dev, "STP reject: abort dev failed %d\n", rc); + } + + for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { + struct hisi_sas_port *port = &hisi_hba->port[port_no]; + struct asd_sas_port *sas_port = &port->sas_port; + struct domain_device *port_dev = sas_port->port_dev; + struct domain_device *device; + + if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type)) + continue; + + /* Try to find a SATA device */ + list_for_each_entry(device, &sas_port->dev_list, + dev_list_node) { + if (dev_is_sata(device)) { + hisi_sas_send_ata_reset_each_phy(hisi_hba, + sas_port, + device); + break; + } + } + } } static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) @@ -1055,7 +1357,6 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) struct device *dev = hisi_hba->dev; struct Scsi_Host *shost = hisi_hba->shost; u32 old_state, state; - unsigned long flags; int rc; if (!hisi_hba->hw->soft_reset) @@ -1064,35 +1365,41 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) return -1; - dev_dbg(dev, "controller resetting...\n"); + dev_info(dev, "controller resetting...\n"); old_state = hisi_hba->hw->get_phys_state(hisi_hba); scsi_block_requests(shost); + hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); + + if (timer_pending(&hisi_hba->timer)) + del_timer_sync(&hisi_hba->timer); + set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); rc = hisi_hba->hw->soft_reset(hisi_hba); if (rc) { dev_warn(dev, "controller reset failed (%d)\n", rc); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + scsi_unblock_requests(shost); goto out; } - spin_lock_irqsave(&hisi_hba->lock, flags); - hisi_sas_release_tasks(hisi_hba); - spin_unlock_irqrestore(&hisi_hba->lock, flags); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); /* Init and wait for PHYs to come up and all libsas event finished. */ hisi_hba->hw->phys_init(hisi_hba); msleep(1000); - drain_workqueue(hisi_hba->wq); - drain_workqueue(shost->work_q); + hisi_sas_refresh_port_id(hisi_hba); + + if (hisi_hba->reject_stp_links_msk) + hisi_sas_terminate_stp_reject(hisi_hba); + hisi_sas_reset_init_all_devices(hisi_hba); + scsi_unblock_requests(shost); state = hisi_hba->hw->get_phys_state(hisi_hba); hisi_sas_rescan_topology(hisi_hba, old_state, state); - dev_dbg(dev, "controller reset complete\n"); + dev_info(dev, "controller reset complete\n"); out: - scsi_unblock_requests(shost); clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); return rc; @@ -1104,20 +1411,25 @@ static int hisi_sas_abort_task(struct sas_task *task) struct hisi_sas_tmf_task tmf_task; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; - struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); - struct device *dev = hisi_hba->dev; + struct hisi_hba *hisi_hba; + struct device *dev; int rc = TMF_RESP_FUNC_FAILED; unsigned long flags; - if (!sas_dev) { - dev_warn(dev, "Device has been removed\n"); + if (!sas_dev) return TMF_RESP_FUNC_FAILED; - } + hisi_hba = dev_to_hisi_hba(task->dev); + dev = hisi_hba->dev; + + spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); rc = TMF_RESP_FUNC_COMPLETE; goto out; } + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); sas_dev->dev_status = HISI_SAS_DEV_EH; if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { @@ -1135,6 +1447,11 @@ static int hisi_sas_abort_task(struct sas_task *task) rc2 = hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_CMD, tag); + if (rc2 < 0) { + dev_err(dev, "abort task: internal abort (%d)\n", rc2); + return TMF_RESP_FUNC_FAILED; + } + /* * If the TMF finds that the IO is not in the device and also * the internal abort does not succeed, then it is safe to @@ -1143,17 +1460,18 @@ static int hisi_sas_abort_task(struct sas_task *task) * will have already been completed */ if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { - if (task->lldd_task) { - spin_lock_irqsave(&hisi_hba->lock, flags); + if (task->lldd_task) hisi_sas_do_release_task(hisi_hba, task, slot); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - } } } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { if (task->dev->dev_type == SAS_SATA_DEV) { - hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_DEV, 0); + rc = hisi_sas_internal_task_abort(hisi_hba, device, + HISI_SAS_INT_ABT_DEV, 0); + if (rc < 0) { + dev_err(dev, "abort task: internal abort failed\n"); + goto out; + } hisi_sas_dereg_device(hisi_hba, device); rc = hisi_sas_softreset_ata_disk(device); } @@ -1164,11 +1482,9 @@ static int hisi_sas_abort_task(struct sas_task *task) rc = hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_CMD, tag); - if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) { - spin_lock_irqsave(&hisi_hba->lock, flags); + if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && + task->lldd_task) hisi_sas_do_release_task(hisi_hba, task, slot); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - } } out: @@ -1179,12 +1495,25 @@ out: static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) { + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = hisi_hba->dev; struct hisi_sas_tmf_task tmf_task; int rc = TMF_RESP_FUNC_FAILED; + rc = hisi_sas_internal_task_abort(hisi_hba, device, + HISI_SAS_INT_ABT_DEV, 0); + if (rc < 0) { + dev_err(dev, "abort task set: internal abort rc=%d\n", rc); + return TMF_RESP_FUNC_FAILED; + } + hisi_sas_dereg_device(hisi_hba, device); + tmf_task.tmf = TMF_ABORT_TASK_SET; rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); + if (rc == TMF_RESP_FUNC_COMPLETE) + hisi_sas_release_task(hisi_hba, device); + return rc; } @@ -1201,12 +1530,39 @@ static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) { - struct sas_phy *phy = sas_get_local_phy(device); + struct sas_phy *local_phy = sas_get_local_phy(device); int rc, reset_type = (device->dev_type == SAS_SATA_DEV || (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; - rc = sas_phy_reset(phy, reset_type); - sas_put_local_phy(phy); - msleep(2000); + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct sas_ha_struct *sas_ha = &hisi_hba->sha; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number]; + struct hisi_sas_phy *phy = container_of(sas_phy, + struct hisi_sas_phy, sas_phy); + DECLARE_COMPLETION_ONSTACK(phyreset); + + if (scsi_is_sas_phy_local(local_phy)) { + phy->in_reset = 1; + phy->reset_completion = &phyreset; + } + + rc = sas_phy_reset(local_phy, reset_type); + sas_put_local_phy(local_phy); + + if (scsi_is_sas_phy_local(local_phy)) { + int ret = wait_for_completion_timeout(&phyreset, 2 * HZ); + unsigned long flags; + + spin_lock_irqsave(&phy->lock, flags); + phy->reset_completion = NULL; + phy->in_reset = 0; + spin_unlock_irqrestore(&phy->lock, flags); + + /* report PHY down if timed out */ + if (!ret) + hisi_sas_phy_down(hisi_hba, sas_phy->id, 0); + } else + msleep(2000); + return rc; } @@ -1214,24 +1570,26 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device) { struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); - unsigned long flags; + struct device *dev = hisi_hba->dev; int rc = TMF_RESP_FUNC_FAILED; if (sas_dev->dev_status != HISI_SAS_DEV_EH) return TMF_RESP_FUNC_FAILED; sas_dev->dev_status = HISI_SAS_DEV_NORMAL; - hisi_sas_internal_task_abort(hisi_hba, device, + rc = hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_DEV, 0); + if (rc < 0) { + dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); + return TMF_RESP_FUNC_FAILED; + } hisi_sas_dereg_device(hisi_hba, device); rc = hisi_sas_debug_I_T_nexus_reset(device); - if (rc == TMF_RESP_FUNC_COMPLETE) { - spin_lock_irqsave(&hisi_hba->lock, flags); + if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) hisi_sas_release_task(hisi_hba, device); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - } + return rc; } @@ -1240,7 +1598,6 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; - unsigned long flags; int rc = TMF_RESP_FUNC_FAILED; sas_dev->dev_status = HISI_SAS_DEV_EH; @@ -1250,29 +1607,33 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) /* Clear internal IO and then hardreset */ rc = hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_DEV, 0); - if (rc == TMF_RESP_FUNC_FAILED) + if (rc < 0) { + dev_err(dev, "lu_reset: internal abort failed\n"); goto out; + } hisi_sas_dereg_device(hisi_hba, device); phy = sas_get_local_phy(device); rc = sas_phy_reset(phy, 1); - if (rc == 0) { - spin_lock_irqsave(&hisi_hba->lock, flags); + if (rc == 0) hisi_sas_release_task(hisi_hba, device); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - } sas_put_local_phy(phy); } else { struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; + rc = hisi_sas_internal_task_abort(hisi_hba, device, + HISI_SAS_INT_ABT_DEV, 0); + if (rc < 0) { + dev_err(dev, "lu_reset: internal abort failed\n"); + goto out; + } + hisi_sas_dereg_device(hisi_hba, device); + rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); - if (rc == TMF_RESP_FUNC_COMPLETE) { - spin_lock_irqsave(&hisi_hba->lock, flags); + if (rc == TMF_RESP_FUNC_COMPLETE) hisi_sas_release_task(hisi_hba, device); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - } } out: if (rc != TMF_RESP_FUNC_COMPLETE) @@ -1284,8 +1645,14 @@ out: static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) { struct hisi_hba *hisi_hba = sas_ha->lldd_ha; + HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); + + queue_work(hisi_hba->wq, &r.work); + wait_for_completion(r.completion); + if (r.done) + return TMF_RESP_FUNC_COMPLETE; - return hisi_sas_controller_reset(hisi_hba); + return TMF_RESP_FUNC_FAILED; } static int hisi_sas_query_task(struct sas_task *task) @@ -1336,7 +1703,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, struct hisi_sas_cmd_hdr *cmd_hdr_base; struct hisi_sas_dq *dq = sas_dev->dq; int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; - unsigned long flags, flags_dq; + unsigned long flags, flags_dq = 0; + int wr_q_index; if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) return -EINVAL; @@ -1355,16 +1723,28 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, } spin_unlock_irqrestore(&hisi_hba->lock, flags); - spin_lock_irqsave(&dq->lock, flags_dq); - rc = hisi_hba->hw->get_free_slot(hisi_hba, dq); - if (rc) + slot = &hisi_hba->slot_info[slot_idx]; + memset(slot, 0, sizeof(struct hisi_sas_slot)); + + slot->buf = dma_pool_alloc(hisi_hba->buffer_pool, + GFP_ATOMIC, &slot->buf_dma); + if (!slot->buf) { + rc = -ENOMEM; goto err_out_tag; + } - dlvry_queue = dq->id; - dlvry_queue_slot = dq->wr_point; + spin_lock_irqsave(&dq->lock, flags_dq); + wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); + if (wr_q_index < 0) { + spin_unlock_irqrestore(&dq->lock, flags_dq); + rc = -EAGAIN; + goto err_out_buf; + } + list_add_tail(&slot->delivery, &dq->list); + spin_unlock_irqrestore(&dq->lock, flags_dq); - slot = &hisi_hba->slot_info[slot_idx]; - memset(slot, 0, sizeof(struct hisi_sas_slot)); + dlvry_queue = dq->id; + dlvry_queue_slot = wr_q_index; slot->idx = slot_idx; slot->n_elem = n_elem; @@ -1374,49 +1754,36 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; slot->task = task; slot->port = port; + slot->is_internal = true; task->lldd_task = slot; - slot->buf = dma_pool_alloc(hisi_hba->buffer_pool, - GFP_ATOMIC, &slot->buf_dma); - if (!slot->buf) { - rc = -ENOMEM; - goto err_out_tag; - } - memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); - rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id, + hisi_sas_task_prep_abort(hisi_hba, slot, device_id, abort_flag, task_tag); - if (rc) - goto err_out_buf; - spin_lock_irqsave(&hisi_hba->lock, flags); - list_add_tail(&slot->entry, &sas_dev->list); - spin_unlock_irqrestore(&hisi_hba->lock, flags); spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&task->task_state_lock, flags); - dq->slot_prep = slot; - - atomic64_inc(&sas_dev->running_req); - + slot->ready = 1; /* send abort command to the chip */ + spin_lock_irqsave(&dq->lock, flags); + list_add_tail(&slot->entry, &sas_dev->list); hisi_hba->hw->start_delivery(dq); - spin_unlock_irqrestore(&dq->lock, flags_dq); + spin_unlock_irqrestore(&dq->lock, flags); return 0; err_out_buf: dma_pool_free(hisi_hba->buffer_pool, slot->buf, - slot->buf_dma); + slot->buf_dma); err_out_tag: spin_lock_irqsave(&hisi_hba->lock, flags); hisi_sas_slot_index_free(hisi_hba, slot_idx); spin_unlock_irqrestore(&hisi_hba->lock, flags); - spin_unlock_irqrestore(&dq->lock, flags_dq); err_out: dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); @@ -1442,8 +1809,14 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, struct device *dev = hisi_hba->dev; int res; + /* + * The interface is not realized means this HW don't support internal + * abort, or don't need to do internal abort. Then here, we return + * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that + * the internal abort has been executed and returned CQ. + */ if (!hisi_hba->hw->prep_abort) - return -EOPNOTSUPP; + return TMF_RESP_FUNC_FAILED; task = sas_alloc_slow_task(GFP_KERNEL); if (!task) @@ -1454,7 +1827,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, task->task_done = hisi_sas_task_done; task->slow_task->timer.data = (unsigned long)task; task->slow_task->timer.function = hisi_sas_tmf_timedout; - task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110); + task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ; add_timer(&task->slow_task->timer); res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, @@ -1475,9 +1848,11 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, if (slot) slot->task = NULL; - dev_err(dev, "internal task abort: timeout.\n"); + dev_err(dev, "internal task abort: timeout and not done.\n"); + res = -EIO; goto exit; - } + } else + dev_err(dev, "internal task abort: timeout.\n"); } if (task->task_status.resp == SAS_TASK_COMPLETE && @@ -1509,6 +1884,10 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) hisi_sas_port_notify_formed(sas_phy); } +static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy) +{ +} + static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data) { @@ -1533,6 +1912,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_ha_struct *sas_ha = &hisi_hba->sha; + struct device *dev = hisi_hba->dev; if (rdy) { /* Phy down but ready */ @@ -1541,6 +1921,10 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) } else { struct hisi_sas_port *port = phy->port; + if (phy->in_reset) { + dev_info(dev, "ignore flutter phy%d down\n", phy_no); + return; + } /* Phy down and not ready */ sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); sas_phy_disconnected(sas_phy); @@ -1579,30 +1963,7 @@ struct device_attribute *host_attrs[] = { &dev_attr_phy_event_threshold, NULL, }; - -static struct scsi_host_template _hisi_sas_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .queuecommand = sas_queuecommand, - .target_alloc = sas_target_alloc, - .slave_configure = hisi_sas_slave_configure, - .scan_finished = hisi_sas_scan_finished, - .scan_start = hisi_sas_scan_start, - .change_queue_depth = sas_change_queue_depth, - .bios_param = sas_bios_param, - .can_queue = 1, - .this_id = -1, - .sg_tablesize = SG_ALL, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .use_clustering = ENABLE_CLUSTERING, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_target_reset_handler = sas_eh_target_reset_handler, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, - .shost_attrs = host_attrs, -}; -struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht; -EXPORT_SYMBOL_GPL(hisi_sas_sht); +EXPORT_SYMBOL_GPL(host_attrs); static struct sas_domain_function_template hisi_sas_transport_ops = { .lldd_dev_found = hisi_sas_dev_found, @@ -1611,13 +1972,14 @@ static struct sas_domain_function_template hisi_sas_transport_ops = { .lldd_control_phy = hisi_sas_control_phy, .lldd_abort_task = hisi_sas_abort_task, .lldd_abort_task_set = hisi_sas_abort_task_set, + .lldd_write_gpio = hisi_sas_write_gpio, .lldd_clear_aca = hisi_sas_clear_aca, .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, .lldd_lu_reset = hisi_sas_lu_reset, .lldd_query_task = hisi_sas_query_task, .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, .lldd_port_formed = hisi_sas_port_formed, - .lldd_write_gpio = hisi_sas_write_gpio, + .lldd_port_deformed = hisi_sas_port_deformed, }; void hisi_sas_init_mem(struct hisi_hba *hisi_hba) @@ -1678,6 +2040,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) cq->hisi_hba = hisi_hba; /* Delivery queue structure */ + spin_lock_init(&dq->lock); + INIT_LIST_HEAD(&dq->list); dq->id = i; dq->hisi_hba = hisi_hba; @@ -1702,13 +2066,11 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) goto err_out; s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); - hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma, + hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma, GFP_KERNEL); if (!hisi_hba->itct) goto err_out; - memset(hisi_hba->itct, 0, s); - hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, sizeof(struct hisi_sas_slot), GFP_KERNEL); @@ -1824,6 +2186,17 @@ void hisi_sas_rst_work_handler(struct work_struct *work) } EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); +void hisi_sas_sync_rst_work_handler(struct work_struct *work) +{ + struct hisi_sas_rst *rst = + container_of(work, struct hisi_sas_rst, work); + + if (!hisi_sas_controller_reset(rst->hisi_hba)) + rst->done = true; + complete(rst->completion); +} +EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); + int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; @@ -1900,7 +2273,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; - shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba)); + shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); if (!shost) { dev_err(dev, "scsi host alloc failed\n"); return NULL; @@ -1949,19 +2322,8 @@ err_out: return NULL; } -void hisi_sas_init_add(struct hisi_hba *hisi_hba) -{ - int i; - - for (i = 0; i < hisi_hba->n_phy; i++) - memcpy(&hisi_hba->phy[i].dev_sas_addr, - hisi_hba->sas_addr, - SAS_ADDR_SIZE); -} -EXPORT_SYMBOL_GPL(hisi_sas_init_add); - int hisi_sas_probe(struct platform_device *pdev, - const struct hisi_sas_hw *hw) + const struct hisi_sas_hw *hw) { struct Scsi_Host *shost; struct hisi_hba *hisi_hba; @@ -2013,8 +2375,6 @@ int hisi_sas_probe(struct platform_device *pdev, sha->sas_port[i] = &hisi_hba->port[i].sas_port; } - hisi_sas_init_add(hisi_hba); - rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_ha; @@ -2046,6 +2406,9 @@ int hisi_sas_remove(struct platform_device *pdev) struct hisi_hba *hisi_hba = sha->lldd_ha; struct Scsi_Host *shost = sha->core.shost; + if (timer_pending(&hisi_hba->timer)) + del_timer(&hisi_hba->timer); + sas_unregister_ha(sha); sas_remove_host(sha->core.shost); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 00b5ee4e49d3..8b11ab1352f5 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -544,7 +544,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba, (0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF)); } -static void free_device_v1_hw(struct hisi_hba *hisi_hba, +static void clear_itct_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { u64 dev_id = sas_dev->device_id; @@ -651,8 +651,10 @@ static int reset_hw_v1_hw(struct hisi_hba *hisi_hba) dev_err(dev, "De-reset failed\n"); return -EIO; } - } else + } else { dev_warn(dev, "no reset method\n"); + return -EINVAL; + } return 0; } @@ -853,39 +855,12 @@ static enum sas_linkrate phy_get_max_linkrate_v1_hw(void) static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *r) { - u32 prog_phy_link_rate = - hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); - struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - int i; - enum sas_linkrate min, max; - u32 rate_mask = 0; - - if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { - max = sas_phy->phy->maximum_linkrate; - min = r->minimum_linkrate; - } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { - max = r->maximum_linkrate; - min = sas_phy->phy->minimum_linkrate; - } else - return; - - sas_phy->phy->maximum_linkrate = max; - sas_phy->phy->minimum_linkrate = min; - - min -= SAS_LINK_RATE_1_5_GBPS; - max -= SAS_LINK_RATE_1_5_GBPS; - - for (i = 0; i <= max; i++) - rate_mask |= 1 << (i * 2); - - prog_phy_link_rate &= ~0xff; - prog_phy_link_rate |= rate_mask; + enum sas_linkrate max = r->maximum_linkrate; + u32 prog_phy_link_rate = 0x800; + prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, - prog_phy_link_rate); - - phy_hard_reset_v1_hw(hisi_hba, phy_no); + prog_phy_link_rate); } static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) @@ -919,37 +894,45 @@ get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) return -EAGAIN; } - return 0; + dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; + + return w; } +/* DQ lock must be taken here */ static void start_delivery_v1_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; - int dlvry_queue = dq->slot_prep->dlvry_queue; - int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; + struct hisi_sas_slot *s, *s1; + struct list_head *dq_list; + int dlvry_queue = dq->id; + int wp, count = 0; + + dq_list = &dq->list; + list_for_each_entry_safe(s, s1, &dq->list, delivery) { + if (!s->ready) + break; + count++; + wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; + list_del(&s->delivery); + } - dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; - hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), - dq->wr_point); + if (!count) + return; + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); } -static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, +static void prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, struct hisi_sas_cmd_hdr *hdr, struct scatterlist *scatter, int n_elem) { struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); - struct device *dev = hisi_hba->dev; struct scatterlist *sg; int i; - if (n_elem > HISI_SAS_SGE_PAGE_CNT) { - dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", - n_elem); - return -EINVAL; - } - for_each_sg(scatter, sg, n_elem, i) { struct hisi_sas_sge *entry = &sge_page->sge[i]; @@ -962,48 +945,25 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); - - return 0; } -static int prep_smp_v1_hw(struct hisi_hba *hisi_hba, +static void prep_smp_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct domain_device *device = task->dev; - struct device *dev = hisi_hba->dev; struct hisi_sas_port *port = slot->port; - struct scatterlist *sg_req, *sg_resp; + struct scatterlist *sg_req; struct hisi_sas_device *sas_dev = device->lldd_dev; dma_addr_t req_dma_addr; - unsigned int req_len, resp_len; - int elem, rc; + unsigned int req_len; - /* - * DMA-map SMP request, response buffers - */ /* req */ sg_req = &task->smp_task.smp_req; - elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); - if (!elem) - return -ENOMEM; req_len = sg_dma_len(sg_req); req_dma_addr = sg_dma_address(sg_req); - /* resp */ - sg_resp = &task->smp_task.smp_resp; - elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); - if (!elem) { - rc = -ENOMEM; - goto err_out_req; - } - resp_len = sg_dma_len(sg_resp); - if ((req_len & 0x3) || (resp_len & 0x3)) { - rc = -EINVAL; - goto err_out_resp; - } - /* create header */ /* dw0 */ hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | @@ -1023,21 +983,10 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba, hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); - - return 0; - -err_out_resp: - dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, - DMA_FROM_DEVICE); -err_out_req: - dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, - DMA_TO_DEVICE); - return rc; } -static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba, - struct hisi_sas_slot *slot, int is_tmf, - struct hisi_sas_tmf_task *tmf) +static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; @@ -1046,7 +995,8 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_port *port = slot->port; struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; - int has_data = 0, rc, priority = is_tmf; + struct hisi_sas_tmf_task *tmf = slot->tmf; + int has_data = 0, priority = !!tmf; u8 *buf_cmd, fburst = 0; u32 dw1, dw2; @@ -1060,7 +1010,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba, dw1 = 1 << CMD_HDR_VERIFY_DTL_OFF; - if (is_tmf) { + if (tmf) { dw1 |= 3 << CMD_HDR_SSP_FRAME_TYPE_OFF; } else { switch (scsi_cmnd->sc_data_direction) { @@ -1081,7 +1031,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba, dw1 |= sas_dev->device_id << CMD_HDR_DEVICE_ID_OFF; hdr->dw1 = cpu_to_le32(dw1); - if (is_tmf) { + if (tmf) { dw2 = ((sizeof(struct ssp_tmf_iu) + sizeof(struct ssp_frame_hdr)+3)/4) << CMD_HDR_CFL_OFF; @@ -1095,12 +1045,9 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba, hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); - if (has_data) { - rc = prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter, + if (has_data) + prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter, slot->n_elem); - if (rc) - return rc; - } hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); @@ -1115,7 +1062,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba, hdr->dw2 = cpu_to_le32(dw2); memcpy(buf_cmd, &task->ssp_task.LUN, 8); - if (!is_tmf) { + if (!tmf) { buf_cmd[9] = fburst | task->ssp_task.task_attr | (task->ssp_task.task_prio << 3); memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, @@ -1134,8 +1081,6 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba, break; } } - - return 0; } /* by default, task resp is complete */ @@ -1407,9 +1352,6 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, } out: - if (sas_dev) - atomic64_dec(&sas_dev->running_req); - hisi_sas_slot_task_free(hisi_hba, task, slot); sts = ts->stat; @@ -1431,6 +1373,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p) u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; irqreturn_t res = IRQ_HANDLED; + unsigned long flags; irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) { @@ -1482,7 +1425,14 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p) else if (phy->identify.device_type != SAS_PHY_UNUSED) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; - queue_work(hisi_hba->wq, &phy->phyup_ws); + hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); + + spin_lock_irqsave(&phy->lock, flags); + if (phy->reset_completion) { + phy->in_reset = 0; + complete(phy->reset_completion); + } + spin_unlock_irqrestore(&phy->lock, flags); end: hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, @@ -1846,11 +1796,33 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba) return 0; } +static struct scsi_host_template sht_v1_hw = { + .name = DRV_NAME, + .module = THIS_MODULE, + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = hisi_sas_slave_configure, + .scan_finished = hisi_sas_scan_finished, + .scan_start = hisi_sas_scan_start, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, + .shost_attrs = host_attrs, +}; + static const struct hisi_sas_hw hisi_sas_v1_hw = { .hw_init = hisi_sas_v1_init, .setup_itct = setup_itct_v1_hw, .sl_notify = sl_notify_v1_hw, - .free_device = free_device_v1_hw, + .clear_itct = clear_itct_v1_hw, .prep_smp = prep_smp_v1_hw, .prep_ssp = prep_ssp_v1_hw, .get_free_slot = get_free_slot_v1_hw, @@ -1865,6 +1837,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = { .get_wideport_bitmap = get_wideport_bitmap_v1_hw, .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW, .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr), + .sht = &sht_v1_hw, }; static int hisi_sas_v1_probe(struct platform_device *pdev) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 03a22bf8ed39..12225f4c9362 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -144,6 +144,7 @@ #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19 #define SAS_ECC_INTR_MSK 0x1ec #define HGC_ERR_STAT_EN 0x238 +#define CQE_SEND_CNT 0x248 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 #define DLVRY_Q_0_DEPTH 0x268 @@ -240,7 +241,12 @@ #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) +#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19 +#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20 +#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 +#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 #define CHL_INT2 (PORT_BASE + 0x1bc) +#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) #define CHL_INT1_MSK (PORT_BASE + 0x1c4) #define CHL_INT2_MSK (PORT_BASE + 0x1c8) @@ -290,6 +296,10 @@ #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) #define CMD_HDR_TLR_CTRL_OFF 6 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) +#define CMD_HDR_PHY_ID_OFF 8 +#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF) +#define CMD_HDR_FORCE_PHY_OFF 17 +#define CMD_HDR_FORCE_PHY_MSK (0x1 << CMD_HDR_FORCE_PHY_OFF) #define CMD_HDR_PORT_OFF 18 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) #define CMD_HDR_PRIORITY_OFF 27 @@ -401,6 +411,17 @@ struct hisi_sas_err_record_v2 { __le32 dma_rx_err_type; }; +struct signal_attenuation_s { + u32 de_emphasis; + u32 preshoot; + u32 boost; +}; + +struct sig_atten_lu_s { + const struct signal_attenuation_s *att; + u32 sas_phy_ctrl; +}; + static const struct hisi_sas_hw_error one_bit_ecc_errors[] = { { .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF), @@ -952,7 +973,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, (0x1ULL << ITCT_HDR_RTOLT_OFF)); } -static void free_device_v2_hw(struct hisi_hba *hisi_hba, +static void clear_itct_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { DECLARE_COMPLETION_ONSTACK(completion); @@ -963,10 +984,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba, sas_dev->completion = &completion; - /* SoC bug workaround */ - if (dev_is_sata(sas_dev->sas_device)) - clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap); - /* clear the itct interrupt state */ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) hisi_sas_write32(hisi_hba, ENT_INT_SRC3, @@ -981,6 +998,15 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba, } } +static void free_device_v2_hw(struct hisi_sas_device *sas_dev) +{ + struct hisi_hba *hisi_hba = sas_dev->hisi_hba; + + /* SoC bug workaround */ + if (dev_is_sata(sas_dev->sas_device)) + clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap); +} + static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) { int i, reset_val; @@ -1074,8 +1100,10 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) dev_err(dev, "SAS de-reset fail.\n"); return -EIO; } - } else - dev_warn(dev, "no reset method\n"); + } else { + dev_err(dev, "no reset method\n"); + return -EINVAL; + } return 0; } @@ -1120,9 +1148,16 @@ static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba) } } +static const struct signal_attenuation_s x6000 = {9200, 0, 10476}; +static const struct sig_atten_lu_s sig_atten_lu[] = { + { &x6000, 0x3016a68 }, +}; + static void init_reg_v2_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; + u32 sas_phy_ctrl = 0x30b9908; + u32 signal[3]; int i; /* Global registers init */ @@ -1166,9 +1201,43 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1); hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); + /* Get sas_phy_ctrl value to deal with TX FFE issue. */ + if (!device_property_read_u32_array(dev, "hisilicon,signal-attenuation", + signal, ARRAY_SIZE(signal))) { + for (i = 0; i < ARRAY_SIZE(sig_atten_lu); i++) { + const struct sig_atten_lu_s *lookup = &sig_atten_lu[i]; + const struct signal_attenuation_s *att = lookup->att; + + if ((signal[0] == att->de_emphasis) && + (signal[1] == att->preshoot) && + (signal[2] == att->boost)) { + sas_phy_ctrl = lookup->sas_phy_ctrl; + break; + } + } + + if (i == ARRAY_SIZE(sig_atten_lu)) + dev_warn(dev, "unknown signal attenuation values, using default PHY ctrl config\n"); + } + for (i = 0; i < hisi_hba->n_phy; i++) { - hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855); - hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908); + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + u32 prog_phy_link_rate = 0x800; + + if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate < + SAS_LINK_RATE_1_5_GBPS)) { + prog_phy_link_rate = 0x855; + } else { + enum sas_linkrate max = sas_phy->phy->maximum_linkrate; + + prog_phy_link_rate = + hisi_sas_get_prog_phy_linkrate_mask(max) | + 0x800; + } + hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, + prog_phy_link_rate); + hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, sas_phy_ctrl); hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0); hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2); @@ -1177,8 +1246,8 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff); hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); - hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); - hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff857fff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbfe); hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc); hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); @@ -1537,39 +1606,12 @@ static enum sas_linkrate phy_get_max_linkrate_v2_hw(void) static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *r) { - u32 prog_phy_link_rate = - hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); - struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - int i; - enum sas_linkrate min, max; - u32 rate_mask = 0; - - if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { - max = sas_phy->phy->maximum_linkrate; - min = r->minimum_linkrate; - } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { - max = r->maximum_linkrate; - min = sas_phy->phy->minimum_linkrate; - } else - return; - - sas_phy->phy->maximum_linkrate = max; - sas_phy->phy->minimum_linkrate = min; - - min -= SAS_LINK_RATE_1_5_GBPS; - max -= SAS_LINK_RATE_1_5_GBPS; - - for (i = 0; i <= max; i++) - rate_mask |= 1 << (i * 2); - - prog_phy_link_rate &= ~0xff; - prog_phy_link_rate |= rate_mask; + enum sas_linkrate max = r->maximum_linkrate; + u32 prog_phy_link_rate = 0x800; + prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, - prog_phy_link_rate); - - phy_hard_reset_v2_hw(hisi_hba, phy_no); + prog_phy_link_rate); } static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) @@ -1610,42 +1652,50 @@ get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) r = hisi_sas_read32_relaxed(hisi_hba, DLVRY_Q_0_RD_PTR + (queue * 0x14)); if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { - dev_warn(dev, "full queue=%d r=%d w=%d\n\n", + dev_warn(dev, "full queue=%d r=%d w=%d\n", queue, r, w); return -EAGAIN; } - return 0; + dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; + + return w; } +/* DQ lock must be taken here */ static void start_delivery_v2_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; - int dlvry_queue = dq->slot_prep->dlvry_queue; - int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; + struct hisi_sas_slot *s, *s1; + struct list_head *dq_list; + int dlvry_queue = dq->id; + int wp, count = 0; + + dq_list = &dq->list; + list_for_each_entry_safe(s, s1, &dq->list, delivery) { + if (!s->ready) + break; + count++; + wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; + list_del(&s->delivery); + } - dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; - hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), - dq->wr_point); + if (!count) + return; + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); } -static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, +static void prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, struct hisi_sas_cmd_hdr *hdr, struct scatterlist *scatter, int n_elem) { struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); - struct device *dev = hisi_hba->dev; struct scatterlist *sg; int i; - if (n_elem > HISI_SAS_SGE_PAGE_CNT) { - dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", - n_elem); - return -EINVAL; - } - for_each_sg(scatter, sg, n_elem, i) { struct hisi_sas_sge *entry = &sge_page->sge[i]; @@ -1658,47 +1708,24 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); - - return 0; } -static int prep_smp_v2_hw(struct hisi_hba *hisi_hba, +static void prep_smp_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct domain_device *device = task->dev; - struct device *dev = hisi_hba->dev; struct hisi_sas_port *port = slot->port; - struct scatterlist *sg_req, *sg_resp; + struct scatterlist *sg_req; struct hisi_sas_device *sas_dev = device->lldd_dev; dma_addr_t req_dma_addr; - unsigned int req_len, resp_len; - int elem, rc; + unsigned int req_len; - /* - * DMA-map SMP request, response buffers - */ /* req */ sg_req = &task->smp_task.smp_req; - elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); - if (!elem) - return -ENOMEM; - req_len = sg_dma_len(sg_req); req_dma_addr = sg_dma_address(sg_req); - - /* resp */ - sg_resp = &task->smp_task.smp_resp; - elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); - if (!elem) { - rc = -ENOMEM; - goto err_out_req; - } - resp_len = sg_dma_len(sg_resp); - if ((req_len & 0x3) || (resp_len & 0x3)) { - rc = -EINVAL; - goto err_out_resp; - } + req_len = sg_dma_len(&task->smp_task.smp_req); /* create header */ /* dw0 */ @@ -1720,21 +1747,10 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba, hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); - - return 0; - -err_out_resp: - dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, - DMA_FROM_DEVICE); -err_out_req: - dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, - DMA_TO_DEVICE); - return rc; } -static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, - struct hisi_sas_slot *slot, int is_tmf, - struct hisi_sas_tmf_task *tmf) +static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; @@ -1743,7 +1759,8 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_port *port = slot->port; struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; - int has_data = 0, rc, priority = is_tmf; + struct hisi_sas_tmf_task *tmf = slot->tmf; + int has_data = 0, priority = !!tmf; u8 *buf_cmd; u32 dw1 = 0, dw2 = 0; @@ -1754,7 +1771,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, (1 << CMD_HDR_CMD_OFF)); /* ssp */ dw1 = 1 << CMD_HDR_VDTL_OFF; - if (is_tmf) { + if (tmf) { dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; } else { @@ -1785,12 +1802,9 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, hdr->transfer_tags = cpu_to_le32(slot->idx); - if (has_data) { - rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, + if (has_data) + prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, slot->n_elem); - if (rc) - return rc; - } hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); @@ -1800,7 +1814,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, sizeof(struct ssp_frame_hdr); memcpy(buf_cmd, &task->ssp_task.LUN, 8); - if (!is_tmf) { + if (!tmf) { buf_cmd[9] = task->ssp_task.task_attr | (task->ssp_task.task_prio << 3); memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, @@ -1819,8 +1833,6 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, break; } } - - return 0; } #define TRANS_TX_ERR 0 @@ -2332,23 +2344,24 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) struct device *dev = hisi_hba->dev; struct task_status_struct *ts; struct domain_device *device; + struct sas_ha_struct *ha; enum exec_status sts; struct hisi_sas_complete_v2_hdr *complete_queue = hisi_hba->complete_hdr[slot->cmplt_queue]; struct hisi_sas_complete_v2_hdr *complete_hdr = &complete_queue[slot->cmplt_queue_slot]; unsigned long flags; - int aborted; + bool is_internal = slot->is_internal; if (unlikely(!task || !task->lldd_task || !task->dev)) return -EINVAL; ts = &task->task_status; device = task->dev; + ha = device->port->ha; sas_dev = device->lldd_dev; spin_lock_irqsave(&task->task_state_lock, flags); - aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; task->task_state_flags &= ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); spin_unlock_irqrestore(&task->task_state_lock, flags); @@ -2356,14 +2369,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) memset(ts, 0, sizeof(*ts)); ts->resp = SAS_TASK_COMPLETE; - if (unlikely(aborted)) { - ts->stat = SAS_ABORTED_TASK; - spin_lock_irqsave(&hisi_hba->lock, flags); - hisi_sas_slot_task_free(hisi_hba, task, slot); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - return -1; - } - if (unlikely(!sas_dev)) { dev_dbg(dev, "slot complete: port has no device\n"); ts->stat = SAS_PHY_DOWN; @@ -2401,6 +2406,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK) >> CMPLT_HDR_ERR_PHASE_OFF; + u32 *error_info = hisi_sas_status_buf_addr_mem(slot); /* Analyse error happens on which phase TX or RX */ if (ERR_ON_TX_PHASE(err_phase)) @@ -2408,6 +2414,16 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) else if (ERR_ON_RX_PHASE(err_phase)) slot_err_v2_hw(hisi_hba, task, slot, 2); + if (ts->stat != SAS_DATA_UNDERRUN) + dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d " + "CQ hdr: 0x%x 0x%x 0x%x 0x%x " + "Error info: 0x%x 0x%x 0x%x 0x%x\n", + slot->idx, task, sas_dev->device_id, + complete_hdr->dw0, complete_hdr->dw1, + complete_hdr->act, complete_hdr->dw3, + error_info[0], error_info[1], + error_info[2], error_info[3]); + if (unlikely(slot->abort)) return ts->stat; goto out; @@ -2457,19 +2473,33 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) } if (!slot->port->port_attached) { - dev_err(dev, "slot complete: port %d has removed\n", + dev_warn(dev, "slot complete: port %d has removed\n", slot->port->sas_port.id); ts->stat = SAS_PHY_DOWN; } out: + hisi_sas_slot_task_free(hisi_hba, task, slot); + sts = ts->stat; spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + dev_info(dev, "slot complete: task(%p) aborted\n", task); + return SAS_ABORTED_TASK; + } task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); - spin_lock_irqsave(&hisi_hba->lock, flags); - hisi_sas_slot_task_free(hisi_hba, task, slot); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - sts = ts->stat; + + if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { + spin_lock_irqsave(&device->done_lock, flags); + if (test_bit(SAS_HA_FROZEN, &ha->state)) { + spin_unlock_irqrestore(&device->done_lock, flags); + dev_info(dev, "slot complete: task(%p) ignored\n ", + task); + return sts; + } + spin_unlock_irqrestore(&device->done_lock, flags); + } if (task->task_done) task->task_done(task); @@ -2477,7 +2507,7 @@ out: return sts; } -static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, +static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; @@ -2487,8 +2517,9 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + struct hisi_sas_tmf_task *tmf = slot->tmf; u8 *buf_cmd; - int has_data = 0, rc = 0, hdr_tag = 0; + int has_data = 0, hdr_tag = 0; u32 dw1 = 0, dw2 = 0; /* create header */ @@ -2499,6 +2530,12 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, else hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); + if (tmf && tmf->force_phy) { + hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK; + hdr->dw0 |= cpu_to_le32((1 << tmf->phy_id) + << CMD_HDR_PHY_ID_OFF); + } + /* dw1 */ switch (task->data_dir) { case DMA_TO_DEVICE: @@ -2518,7 +2555,7 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, dw1 |= 1 << CMD_HDR_RESET_OFF; dw1 |= (hisi_sas_get_ata_protocol( - task->ata_task.fis.command, task->data_dir)) + &task->ata_task.fis, task->data_dir)) << CMD_HDR_FRAME_TYPE_OFF; dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; hdr->dw1 = cpu_to_le32(dw1); @@ -2536,12 +2573,9 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, /* dw3 */ hdr->transfer_tags = cpu_to_le32(slot->idx); - if (has_data) { - rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, + if (has_data) + prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, slot->n_elem); - if (rc) - return rc; - } hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); @@ -2553,8 +2587,6 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ /* fill in command FIS */ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); - - return 0; } static void hisi_sas_internal_abort_quirk_timeout(unsigned long data) @@ -2591,7 +2623,7 @@ static void hisi_sas_internal_abort_quirk_timeout(unsigned long data) } } -static int prep_abort_v2_hw(struct hisi_hba *hisi_hba, +static void prep_abort_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, int device_id, int abort_flag, int tag_to_abort) { @@ -2610,7 +2642,7 @@ static int prep_abort_v2_hw(struct hisi_hba *hisi_hba, /* dw0 */ hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ (port->id << CMD_HDR_PORT_OFF) | - ((dev_is_sata(dev) ? 1:0) << + (dev_is_sata(dev) << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | (abort_flag << CMD_HDR_ABORT_FLAG_OFF)); @@ -2620,19 +2652,18 @@ static int prep_abort_v2_hw(struct hisi_hba *hisi_hba, /* dw7 */ hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); hdr->transfer_tags = cpu_to_le32(slot->idx); - - return 0; } static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) { int i, res = IRQ_HANDLED; - u32 port_id, link_rate, hard_phy_linkrate; + u32 port_id, link_rate; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct device *dev = hisi_hba->dev; u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; + unsigned long flags; hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); @@ -2666,11 +2697,6 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) } sas_phy->linkrate = link_rate; - hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, - HARD_PHY_LINKRATE); - phy->maximum_linkrate = hard_phy_linkrate & 0xf; - phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; - sas_phy->oob_mode = SAS_OOB_MODE; memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE); dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); @@ -2689,7 +2715,13 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) if (!timer_pending(&hisi_hba->timer)) set_link_timer_quirk(hisi_hba); } - queue_work(hisi_hba->wq, &phy->phyup_ws); + hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); + spin_lock_irqsave(&phy->lock, flags); + if (phy->reset_completion) { + phy->in_reset = 0; + complete(phy->reset_completion); + } + spin_unlock_irqrestore(&phy->lock, flags); end: hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, @@ -2715,10 +2747,12 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) u32 phy_state, sl_ctrl, txid_auto; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct hisi_sas_port *port = phy->port; + struct device *dev = hisi_hba->dev; hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); @@ -2815,6 +2849,33 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); } +static const struct hisi_sas_hw_error port_ecc_axi_error[] = { + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_ERR_OFF), + .msg = "dmac_tx_ecc_bad_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_ERR_OFF), + .msg = "dmac_rx_ecc_bad_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF), + .msg = "dma_tx_axi_wr_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF), + .msg = "dma_tx_axi_rd_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF), + .msg = "dma_rx_axi_wr_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF), + .msg = "dma_rx_axi_rd_err", + }, +}; + static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; @@ -2831,40 +2892,55 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff; while (irq_msk) { - if (irq_msk & (1 << phy_no)) { - u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, - CHL_INT0); - u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, - CHL_INT1); - u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, - CHL_INT2); - - if (irq_value1) { - if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK | - CHL_INT1_DMAC_TX_ECC_ERR_MSK)) - panic("%s: DMAC RX/TX ecc bad error!\ - (0x%x)", - dev_name(dev), irq_value1); - - hisi_sas_phy_write32(hisi_hba, phy_no, - CHL_INT1, irq_value1); - } + u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT0); + u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT1); + u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT2); + + if ((irq_msk & (1 << phy_no)) && irq_value1) { + int i; + + for (i = 0; i < ARRAY_SIZE(port_ecc_axi_error); i++) { + const struct hisi_sas_hw_error *error = + &port_ecc_axi_error[i]; + + if (!(irq_value1 & error->irq_msk)) + continue; - if (irq_value2) - hisi_sas_phy_write32(hisi_hba, phy_no, - CHL_INT2, irq_value2); + dev_warn(dev, "%s error (phy%d 0x%x) found!\n", + error->msg, phy_no, irq_value1); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT1, irq_value1); + } - if (irq_value0) { - if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) - phy_bcast_v2_hw(phy_no, hisi_hba); + if ((irq_msk & (1 << phy_no)) && irq_value2) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; - hisi_sas_phy_write32(hisi_hba, phy_no, - CHL_INT0, irq_value0 - & (~CHL_INT0_HOTPLUG_TOUT_MSK) - & (~CHL_INT0_SL_PHY_ENABLE_MSK) - & (~CHL_INT0_NOT_RDY_MSK)); + if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { + dev_warn(dev, "phy%d identify timeout\n", + phy_no); + hisi_sas_notify_phy_event(phy, + HISI_PHYE_LINK_RESET); } + + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT2, irq_value2); + } + + if ((irq_msk & (1 << phy_no)) && irq_value0) { + if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) + phy_bcast_v2_hw(phy_no, hisi_hba); + + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT0, irq_value0 + & (~CHL_INT0_HOTPLUG_TOUT_MSK) + & (~CHL_INT0_SL_PHY_ENABLE_MSK) + & (~CHL_INT0_NOT_RDY_MSK)); } irq_msk &= ~(1 << phy_no); phy_no++; @@ -2908,7 +2984,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, val = hisi_sas_read32(hisi_hba, ecc_error->reg); val &= ecc_error->msk; val >>= ecc_error->shift; - dev_warn(dev, ecc_error->msg, irq_value, val); + dev_err(dev, ecc_error->msg, irq_value, val); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } } @@ -3017,12 +3093,12 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) for (; sub->msk || sub->msg; sub++) { if (!(err_value & sub->msk)) continue; - dev_warn(dev, "%s (0x%x) found!\n", + dev_err(dev, "%s (0x%x) found!\n", sub->msg, irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } } else { - dev_warn(dev, "%s (0x%x) found!\n", + dev_err(dev, "%s (0x%x) found!\n", axi_error->msg, irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } @@ -3053,14 +3129,12 @@ static void cq_tasklet_v2_hw(unsigned long val) struct hisi_sas_complete_v2_hdr *complete_queue; u32 rd_point = cq->rd_point, wr_point, dev_id; int queue = cq->id; - struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; if (unlikely(hisi_hba->reject_stp_links_msk)) phys_try_accept_stp_links_v2_hw(hisi_hba); complete_queue = hisi_hba->complete_hdr[queue]; - spin_lock(&dq->lock); wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + (0x14 * queue)); @@ -3110,7 +3184,6 @@ static void cq_tasklet_v2_hw(unsigned long val) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); - spin_unlock(&dq->lock); } static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) @@ -3137,6 +3210,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; irqreturn_t res = IRQ_HANDLED; u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; + unsigned long flags; int phy_no, offset; phy_no = sas_phy->id; @@ -3197,6 +3271,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) sas_phy->oob_mode = SATA_OOB_MODE; /* Make up some unique SAS address */ attached_sas_addr[0] = 0x50; + attached_sas_addr[6] = hisi_hba->shost->host_no; attached_sas_addr[7] = phy_no; memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis)); @@ -3208,8 +3283,14 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) phy->identify.device_type = SAS_SATA_DEV; phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; - queue_work(hisi_hba->wq, &phy->phyup_ws); + hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); + spin_lock_irqsave(&phy->lock, flags); + if (phy->reset_completion) { + phy->in_reset = 0; + complete(phy->reset_completion); + } + spin_unlock_irqrestore(&phy->lock, flags); end: hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); @@ -3394,7 +3475,7 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba) udelay(10); if (cnt++ > 10) { - dev_info(dev, "wait axi bus state to idle timeout!\n"); + dev_err(dev, "wait axi bus state to idle timeout!\n"); return -1; } } @@ -3448,6 +3529,46 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type, return 0; } +static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms) +{ + struct device *dev = hisi_hba->dev; + int entries, entries_old = 0, time; + + for (time = 0; time < timeout_ms; time += delay_ms) { + entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); + if (entries == entries_old) + break; + + entries_old = entries; + msleep(delay_ms); + } + + dev_dbg(dev, "wait commands complete %dms\n", time); +} + +static struct scsi_host_template sht_v2_hw = { + .name = DRV_NAME, + .module = THIS_MODULE, + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = hisi_sas_slave_configure, + .scan_finished = hisi_sas_scan_finished, + .scan_start = hisi_sas_scan_start, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, + .shost_attrs = host_attrs, +}; + static const struct hisi_sas_hw hisi_sas_v2_hw = { .hw_init = hisi_sas_v2_init, .setup_itct = setup_itct_v2_hw, @@ -3455,6 +3576,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = { .alloc_dev = alloc_dev_quirk_v2_hw, .sl_notify = sl_notify_v2_hw, .get_wideport_bitmap = get_wideport_bitmap_v2_hw, + .clear_itct = clear_itct_v2_hw, .free_device = free_device_v2_hw, .prep_smp = prep_smp_v2_hw, .prep_ssp = prep_ssp_v2_hw, @@ -3475,6 +3597,8 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = { .soft_reset = soft_reset_v2_hw, .get_phys_state = get_phys_state_v2_hw, .write_gpio = write_gpio_v2_hw, + .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v2_hw, + .sht = &sht_v2_hw, }; static int hisi_sas_v2_probe(struct platform_device *pdev) @@ -3499,9 +3623,6 @@ static int hisi_sas_v2_remove(struct platform_device *pdev) struct sas_ha_struct *sha = platform_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; - if (timer_pending(&hisi_hba->timer)) - del_timer(&hisi_hba->timer); - hisi_sas_kill_tasklets(hisi_hba); return hisi_sas_remove(pdev); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index d1bf5af26ccb..b2f6cf780ef1 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -92,6 +92,7 @@ #define SAS_ECC_INTR 0x1e8 #define SAS_ECC_INTR_MSK 0x1ec #define HGC_ERR_STAT_EN 0x238 +#define CQE_SEND_CNT 0x248 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 #define DLVRY_Q_0_DEPTH 0x268 @@ -106,6 +107,11 @@ #define COMPL_Q_0_RD_PTR 0x4f0 #define AWQOS_AWCACHE_CFG 0xc84 #define ARQOS_ARCACHE_CFG 0xc88 +#define HILINK_ERR_DFX 0xe04 +#define SAS_GPIO_CFG_0 0x1000 +#define SAS_GPIO_CFG_1 0x1004 +#define SAS_GPIO_TX_0_1 0x1040 +#define SAS_CFG_DRIVE_VLD 0x1070 /* phy registers requiring init */ #define PORT_BASE (0x2000) @@ -140,6 +146,7 @@ #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) #define STP_LINK_TIMER (PORT_BASE + 0x120) +#define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124) #define CON_CFG_DRIVER (PORT_BASE + 0x130) #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) @@ -165,10 +172,14 @@ #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 #define CHL_INT2 (PORT_BASE + 0x1bc) +#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 +#define CHL_INT2_RX_INVLD_DW_OFF 30 +#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) #define CHL_INT1_MSK (PORT_BASE + 0x1c4) #define CHL_INT2_MSK (PORT_BASE + 0x1c8) #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) +#define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4) #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) @@ -181,6 +192,8 @@ #define DMA_RX_STATUS (PORT_BASE + 0x2e8) #define DMA_RX_STATUS_BUSY_OFF 0 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) + +#define COARSETUNE_TIME (PORT_BASE + 0x304) #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380) #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384) #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390) @@ -204,6 +217,16 @@ #define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8 #define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF) +/* RAS registers need init */ +#define RAS_BASE (0x6000) +#define SAS_RAS_INTR0 (RAS_BASE) +#define SAS_RAS_INTR1 (RAS_BASE + 0x04) +#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08) +#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c) +#define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c) +#define SAS_RAS_INTR2 (RAS_BASE + 0x20) +#define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24) + /* HW dma structures */ /* Delivery queue header */ /* dw0 */ @@ -330,21 +353,16 @@ struct hisi_sas_err_record_v3 { #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 #define HISI_SAS_MSI_COUNT_V3_HW 32 -enum { - HISI_SAS_PHY_PHY_UPDOWN, - HISI_SAS_PHY_CHNL_INT, - HISI_SAS_PHY_INT_NR -}; - #define DIR_NO_DATA 0 #define DIR_TO_INI 1 #define DIR_TO_DEVICE 2 #define DIR_RESERVED 3 -#define CMD_IS_UNCONSTRAINT(cmd) \ - ((cmd == ATA_CMD_READ_LOG_EXT) || \ - (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \ - (cmd == ATA_CMD_DEV_RESET)) +#define FIS_CMD_IS_UNCONSTRAINED(fis) \ + ((fis.command == ATA_CMD_READ_LOG_EXT) || \ + (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \ + ((fis.command == ATA_CMD_DEV_RESET) && \ + ((fis.control & ATA_SRST) != 0))) static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { @@ -383,8 +401,23 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, return readl(regs); } +#define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \ + timeout_us) \ +({ \ + void __iomem *regs = hisi_hba->regs + off; \ + readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \ +}) + +#define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \ + timeout_us) \ +({ \ + void __iomem *regs = hisi_hba->regs + off; \ + readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\ +}) + static void init_reg_v3_hw(struct hisi_hba *hisi_hba) { + struct pci_dev *pdev = hisi_hba->pci_dev; int i; /* Global registers init */ @@ -402,7 +435,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff); + if (pdev->revision >= 0x21) + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff); + else + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff); hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); @@ -413,32 +449,48 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); - hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE, 0x30000); for (i = 0; i < hisi_hba->n_phy; i++) { - hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x801); + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + u32 prog_phy_link_rate = 0x800; + + if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate < + SAS_LINK_RATE_1_5_GBPS)) { + prog_phy_link_rate = 0x855; + } else { + enum sas_linkrate max = sas_phy->phy->maximum_linkrate; + + prog_phy_link_rate = + hisi_sas_get_prog_phy_linkrate_mask(max) | + 0x800; + } + hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, + prog_phy_link_rate); + hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); - hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff); - hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); + if (pdev->revision >= 0x21) + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, + 0xffffffff); + else + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, + 0xff87ffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); - hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); - hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199b4fa); - hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, - 0xa03e8); - hisi_sas_phy_write32(hisi_hba, i, SAS_STP_CON_TIMER_CFG, - 0xa03e8); - hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, - 0x7f7a120); - hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, - 0x2a0a80); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); + hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); + + /* used for 12G negotiate */ + hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); } + for (i = 0; i < hisi_hba->queue_count; i++) { /* Delivery queue */ hisi_sas_write32(hisi_hba, @@ -496,6 +548,20 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, upper_32_bits(hisi_hba->initial_fis_dma)); + + /* RAS registers init */ + hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0); + hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0); + hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0); + hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0); + + /* LED registers init */ + hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff); + hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080); + hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080); + /* Configure blink generator rate A to 1Hz and B to 4Hz */ + hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700); + hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000); } static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) @@ -588,7 +654,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, (0x1ULL << ITCT_HDR_RTOLT_OFF)); } -static void free_device_v3_hw(struct hisi_hba *hisi_hba, +static void clear_itct_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { DECLARE_COMPLETION_ONSTACK(completion); @@ -647,8 +713,8 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) udelay(50); /* Ensure axi bus idle */ - ret = readl_poll_timeout(hisi_hba->regs + AXI_CFG, val, !val, - 20000, 1000000); + ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val, + 20000, 1000000); if (ret) { dev_err(dev, "axi bus is not idle, ret = %d!\n", ret); return -EIO; @@ -662,8 +728,10 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) dev_err(dev, "Reset failed\n"); return -EIO; } - } else + } else { dev_err(dev, "no reset method!\n"); + return -EINVAL; + } return 0; } @@ -723,7 +791,7 @@ static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) start_phy_v3_hw(hisi_hba, phy_no); } -enum sas_linkrate phy_get_max_linkrate_v3_hw(void) +static enum sas_linkrate phy_get_max_linkrate_v3_hw(void) { return SAS_LINK_RATE_12_0_GBPS; } @@ -785,42 +853,49 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) r = hisi_sas_read32_relaxed(hisi_hba, DLVRY_Q_0_RD_PTR + (queue * 0x14)); if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { - dev_warn(dev, "full queue=%d r=%d w=%d\n\n", + dev_warn(dev, "full queue=%d r=%d w=%d\n", queue, r, w); return -EAGAIN; } - return 0; + dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; + + return w; } static void start_delivery_v3_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; - int dlvry_queue = dq->slot_prep->dlvry_queue; - int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; + struct hisi_sas_slot *s, *s1; + struct list_head *dq_list; + int dlvry_queue = dq->id; + int wp, count = 0; + + dq_list = &dq->list; + list_for_each_entry_safe(s, s1, &dq->list, delivery) { + if (!s->ready) + break; + count++; + wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; + list_del(&s->delivery); + } - dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; - hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), - dq->wr_point); + if (!count) + return; + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); } -static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, +static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, struct hisi_sas_cmd_hdr *hdr, struct scatterlist *scatter, int n_elem) { struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); - struct device *dev = hisi_hba->dev; struct scatterlist *sg; int i; - if (n_elem > HISI_SAS_SGE_PAGE_CNT) { - dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", - n_elem); - return -EINVAL; - } - for_each_sg(scatter, sg, n_elem, i) { struct hisi_sas_sge *entry = &sge_page->sge[i]; @@ -833,13 +908,10 @@ static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); - - return 0; } -static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, - struct hisi_sas_slot *slot, int is_tmf, - struct hisi_sas_tmf_task *tmf) +static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; @@ -848,7 +920,8 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_port *port = slot->port; struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; - int has_data = 0, rc, priority = is_tmf; + struct hisi_sas_tmf_task *tmf = slot->tmf; + int has_data = 0, priority = !!tmf; u8 *buf_cmd; u32 dw1 = 0, dw2 = 0; @@ -859,7 +932,7 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, (1 << CMD_HDR_CMD_OFF)); /* ssp */ dw1 = 1 << CMD_HDR_VDTL_OFF; - if (is_tmf) { + if (tmf) { dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; } else { @@ -889,12 +962,9 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, hdr->dw2 = cpu_to_le32(dw2); hdr->transfer_tags = cpu_to_le32(slot->idx); - if (has_data) { - rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, + if (has_data) + prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, slot->n_elem); - if (rc) - return rc; - } hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); @@ -904,7 +974,7 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, sizeof(struct ssp_frame_hdr); memcpy(buf_cmd, &task->ssp_task.LUN, 8); - if (!is_tmf) { + if (!tmf) { buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3); memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); } else { @@ -921,48 +991,25 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, break; } } - - return 0; } -static int prep_smp_v3_hw(struct hisi_hba *hisi_hba, +static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct domain_device *device = task->dev; - struct device *dev = hisi_hba->dev; struct hisi_sas_port *port = slot->port; - struct scatterlist *sg_req, *sg_resp; + struct scatterlist *sg_req; struct hisi_sas_device *sas_dev = device->lldd_dev; dma_addr_t req_dma_addr; - unsigned int req_len, resp_len; - int elem, rc; + unsigned int req_len; - /* - * DMA-map SMP request, response buffers - */ /* req */ sg_req = &task->smp_task.smp_req; - elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); - if (!elem) - return -ENOMEM; req_len = sg_dma_len(sg_req); req_dma_addr = sg_dma_address(sg_req); - /* resp */ - sg_resp = &task->smp_task.smp_resp; - elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); - if (!elem) { - rc = -ENOMEM; - goto err_out_req; - } - resp_len = sg_dma_len(sg_resp); - if ((req_len & 0x3) || (resp_len & 0x3)) { - rc = -EINVAL; - goto err_out_resp; - } - /* create header */ /* dw0 */ hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | @@ -984,18 +1031,9 @@ static int prep_smp_v3_hw(struct hisi_hba *hisi_hba, hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); - return 0; - -err_out_resp: - dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, - DMA_FROM_DEVICE); -err_out_req: - dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, - DMA_TO_DEVICE); - return rc; } -static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, +static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; @@ -1006,7 +1044,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); u8 *buf_cmd; - int has_data = 0, rc = 0, hdr_tag = 0; + int has_data = 0, hdr_tag = 0; u32 dw1 = 0, dw2 = 0; hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); @@ -1033,11 +1071,11 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, dw1 |= 1 << CMD_HDR_RESET_OFF; dw1 |= (hisi_sas_get_ata_protocol( - task->ata_task.fis.command, task->data_dir)) + &task->ata_task.fis, task->data_dir)) << CMD_HDR_FRAME_TYPE_OFF; dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; - if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command)) + if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; hdr->dw1 = cpu_to_le32(dw1); @@ -1055,12 +1093,9 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, /* dw3 */ hdr->transfer_tags = cpu_to_le32(slot->idx); - if (has_data) { - rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, + if (has_data) + prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, slot->n_elem); - if (rc) - return rc; - } hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); @@ -1072,11 +1107,9 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ /* fill in command FIS */ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); - - return 0; } -static int prep_abort_v3_hw(struct hisi_hba *hisi_hba, +static void prep_abort_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, int device_id, int abort_flag, int tag_to_abort) { @@ -1088,7 +1121,7 @@ static int prep_abort_v3_hw(struct hisi_hba *hisi_hba, /* dw0 */ hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ (port->id << CMD_HDR_PORT_OFF) | - ((dev_is_sata(dev) ? 1:0) + (dev_is_sata(dev) << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | (abort_flag << CMD_HDR_ABORT_FLAG_OFF)); @@ -1101,16 +1134,16 @@ static int prep_abort_v3_hw(struct hisi_hba *hisi_hba, hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); hdr->transfer_tags = cpu_to_le32(slot->idx); - return 0; } -static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) +static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { - int i, res = 0; - u32 context, port_id, link_rate, hard_phy_linkrate; + int i, res; + u32 context, port_id, link_rate; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct device *dev = hisi_hba->dev; + unsigned long flags; hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); @@ -1125,10 +1158,6 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) goto end; } sas_phy->linkrate = link_rate; - hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, - HARD_PHY_LINKRATE); - phy->maximum_linkrate = hard_phy_linkrate & 0xf; - phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); /* Check for SATA dev */ @@ -1138,7 +1167,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) struct dev_to_host_fis *fis; u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; - dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); + dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate); initial_fis = &hisi_hba->initial_fis[phy_no]; fis = &initial_fis->fis; sas_phy->oob_mode = SATA_OOB_MODE; @@ -1181,8 +1210,14 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) phy->port_id = port_id; phy->phy_attached = 1; - queue_work(hisi_hba->wq, &phy->phyup_ws); - + hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); + res = IRQ_HANDLED; + spin_lock_irqsave(&phy->lock, flags); + if (phy->reset_completion) { + phy->in_reset = 0; + complete(phy->reset_completion); + } + spin_unlock_irqrestore(&phy->lock, flags); end: hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_SL_PHY_ENABLE_MSK); @@ -1191,7 +1226,7 @@ end: return res; } -static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) +static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { u32 phy_state, sl_ctrl, txid_auto; struct device *dev = hisi_hba->dev; @@ -1213,10 +1248,10 @@ static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); - return 0; + return IRQ_HANDLED; } -static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) +static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; @@ -1227,6 +1262,8 @@ static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_SL_RX_BCST_ACK_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); + + return IRQ_HANDLED; } static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) @@ -1253,7 +1290,9 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) res = IRQ_HANDLED; if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK) /* phy bcast */ - phy_bcast_v3_hw(phy_no, hisi_hba); + if (phy_bcast_v3_hw(phy_no, hisi_hba) + == IRQ_HANDLED) + res = IRQ_HANDLED; } else { if (irq_value & CHL_INT0_NOT_RDY_MSK) /* phy down */ @@ -1292,14 +1331,10 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; struct device *dev = hisi_hba->dev; - u32 ent_msk, ent_tmp, irq_msk; + struct pci_dev *pci_dev = hisi_hba->pci_dev; + u32 irq_msk; int phy_no = 0; - ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); - ent_tmp = ent_msk; - ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK; - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk); - irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) & 0xeeeeeeee; @@ -1310,6 +1345,13 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) CHL_INT1); u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); + u32 irq_msk1 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT1_MSK); + u32 irq_msk2 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT2_MSK); + + irq_value1 &= ~irq_msk1; + irq_value2 &= ~irq_msk2; if ((irq_msk & (4 << (phy_no * 4))) && irq_value1) { @@ -1322,7 +1364,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) if (!(irq_value1 & error->irq_msk)) continue; - dev_warn(dev, "%s error (phy%d 0x%x) found!\n", + dev_err(dev, "%s error (phy%d 0x%x) found!\n", error->msg, phy_no, irq_value1); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } @@ -1331,10 +1373,52 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) CHL_INT1, irq_value1); } - if (irq_msk & (8 << (phy_no * 4)) && irq_value2) + if (irq_msk & (8 << (phy_no * 4)) && irq_value2) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + + if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { + dev_warn(dev, "phy%d identify timeout\n", + phy_no); + hisi_sas_notify_phy_event(phy, + HISI_PHYE_LINK_RESET); + + } + + if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) { + u32 reg_value = hisi_sas_phy_read32(hisi_hba, + phy_no, STP_LINK_TIMEOUT_STATE); + + dev_warn(dev, "phy%d stp link timeout (0x%x)\n", + phy_no, reg_value); + if (reg_value & BIT(4)) + hisi_sas_notify_phy_event(phy, + HISI_PHYE_LINK_RESET); + } + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value2); + if ((irq_value2 & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && + (pci_dev->revision == 0x20)) { + u32 reg_value; + int rc; + + rc = hisi_sas_read32_poll_timeout_atomic( + HILINK_ERR_DFX, reg_value, + !((reg_value >> 8) & BIT(phy_no)), + 1000, 10000); + if (rc) { + disable_phy_v3_hw(hisi_hba, phy_no); + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT2, + BIT(CHL_INT2_RX_INVLD_DW_OFF)); + hisi_sas_phy_read32(hisi_hba, phy_no, + ERR_CNT_INVLD_DW); + mdelay(1); + enable_phy_v3_hw(hisi_hba, phy_no); + } + } + } if (irq_msk & (2 << (phy_no * 4)) && irq_value0) { hisi_sas_phy_write32(hisi_hba, phy_no, @@ -1347,8 +1431,6 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) phy_no++; } - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp); - return IRQ_HANDLED; } @@ -1417,6 +1499,7 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00); irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + irq_value &= ~irq_msk; for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) { const struct hisi_sas_hw_error *error = &fatal_axi_error[i]; @@ -1432,12 +1515,12 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) if (!(err_value & sub->msk)) continue; - dev_warn(dev, "%s error (0x%x) found!\n", + dev_err(dev, "%s error (0x%x) found!\n", sub->msg, irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } } else { - dev_warn(dev, "%s error (0x%x) found!\n", + dev_err(dev, "%s error (0x%x) found!\n", error->msg, irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } @@ -1518,36 +1601,30 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) struct device *dev = hisi_hba->dev; struct task_status_struct *ts; struct domain_device *device; + struct sas_ha_struct *ha; enum exec_status sts; struct hisi_sas_complete_v3_hdr *complete_queue = hisi_hba->complete_hdr[slot->cmplt_queue]; struct hisi_sas_complete_v3_hdr *complete_hdr = &complete_queue[slot->cmplt_queue_slot]; - int aborted; unsigned long flags; + bool is_internal = slot->is_internal; if (unlikely(!task || !task->lldd_task || !task->dev)) return -EINVAL; ts = &task->task_status; device = task->dev; + ha = device->port->ha; sas_dev = device->lldd_dev; spin_lock_irqsave(&task->task_state_lock, flags); - aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; task->task_state_flags &= ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); spin_unlock_irqrestore(&task->task_state_lock, flags); memset(ts, 0, sizeof(*ts)); ts->resp = SAS_TASK_COMPLETE; - if (unlikely(aborted)) { - ts->stat = SAS_ABORTED_TASK; - spin_lock_irqsave(&hisi_hba->lock, flags); - hisi_sas_slot_task_free(hisi_hba, task, slot); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - return -1; - } if (unlikely(!sas_dev)) { dev_dbg(dev, "slot complete: port has not device\n"); @@ -1583,7 +1660,18 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) /* check for erroneous completion */ if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { + u32 *error_info = hisi_sas_status_buf_addr_mem(slot); + slot_err_v3_hw(hisi_hba, task, slot); + if (ts->stat != SAS_DATA_UNDERRUN) + dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d " + "CQ hdr: 0x%x 0x%x 0x%x 0x%x " + "Error info: 0x%x 0x%x 0x%x 0x%x\n", + slot->idx, task, sas_dev->device_id, + complete_hdr->dw0, complete_hdr->dw1, + complete_hdr->act, complete_hdr->dw3, + error_info[0], error_info[1], + error_info[2], error_info[3]); if (unlikely(slot->abort)) return ts->stat; goto out; @@ -1628,19 +1716,33 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) } if (!slot->port->port_attached) { - dev_err(dev, "slot complete: port %d has removed\n", + dev_warn(dev, "slot complete: port %d has removed\n", slot->port->sas_port.id); ts->stat = SAS_PHY_DOWN; } out: + hisi_sas_slot_task_free(hisi_hba, task, slot); + sts = ts->stat; spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + dev_info(dev, "slot complete: task(%p) aborted\n", task); + return SAS_ABORTED_TASK; + } task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); - spin_lock_irqsave(&hisi_hba->lock, flags); - hisi_sas_slot_task_free(hisi_hba, task, slot); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - sts = ts->stat; + + if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { + spin_lock_irqsave(&device->done_lock, flags); + if (test_bit(SAS_HA_FROZEN, &ha->state)) { + spin_unlock_irqrestore(&device->done_lock, flags); + dev_info(dev, "slot complete: task(%p) ignored\n ", + task); + return sts; + } + spin_unlock_irqrestore(&device->done_lock, flags); + } if (task->task_done) task->task_done(task); @@ -1653,56 +1755,30 @@ static void cq_tasklet_v3_hw(unsigned long val) struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; struct hisi_hba *hisi_hba = cq->hisi_hba; struct hisi_sas_slot *slot; - struct hisi_sas_itct *itct; struct hisi_sas_complete_v3_hdr *complete_queue; - u32 rd_point = cq->rd_point, wr_point, dev_id; + u32 rd_point = cq->rd_point, wr_point; int queue = cq->id; - struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; complete_queue = hisi_hba->complete_hdr[queue]; - spin_lock(&dq->lock); wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + (0x14 * queue)); while (rd_point != wr_point) { struct hisi_sas_complete_v3_hdr *complete_hdr; + struct device *dev = hisi_hba->dev; int iptt; complete_hdr = &complete_queue[rd_point]; - /* Check for NCQ completion */ - if (complete_hdr->act) { - u32 act_tmp = complete_hdr->act; - int ncq_tag_count = ffs(act_tmp); - - dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >> - CMPLT_HDR_DEV_ID_OFF; - itct = &hisi_hba->itct[dev_id]; - - /* The NCQ tags are held in the itct header */ - while (ncq_tag_count) { - __le64 *ncq_tag = &itct->qw4_15[0]; - - ncq_tag_count -= 1; - iptt = (ncq_tag[ncq_tag_count / 5] - >> (ncq_tag_count % 5) * 12) & 0xfff; - - slot = &hisi_hba->slot_info[iptt]; - slot->cmplt_queue_slot = rd_point; - slot->cmplt_queue = queue; - slot_complete_v3_hw(hisi_hba, slot); - - act_tmp &= ~(1 << ncq_tag_count); - ncq_tag_count = ffs(act_tmp); - } - } else { - iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; + iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; + if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { slot = &hisi_hba->slot_info[iptt]; slot->cmplt_queue_slot = rd_point; slot->cmplt_queue = queue; slot_complete_v3_hw(hisi_hba, slot); - } + } else + dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt); if (++rd_point >= HISI_SAS_QUEUE_SLOTS) rd_point = 0; @@ -1711,7 +1787,6 @@ static void cq_tasklet_v3_hw(unsigned long val) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); - spin_unlock(&dq->lock); } static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) @@ -1824,39 +1899,12 @@ static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *r) { - u32 prog_phy_link_rate = - hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); - struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - int i; - enum sas_linkrate min, max; - u32 rate_mask = 0; - - if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { - max = sas_phy->phy->maximum_linkrate; - min = r->minimum_linkrate; - } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { - max = r->maximum_linkrate; - min = sas_phy->phy->minimum_linkrate; - } else - return; - - sas_phy->phy->maximum_linkrate = max; - sas_phy->phy->minimum_linkrate = min; - - min -= SAS_LINK_RATE_1_5_GBPS; - max -= SAS_LINK_RATE_1_5_GBPS; - - for (i = 0; i <= max; i++) - rate_mask |= 1 << (i * 2); - - prog_phy_link_rate &= ~0xff; - prog_phy_link_rate |= rate_mask; + enum sas_linkrate max = r->maximum_linkrate; + u32 prog_phy_link_rate = 0x800; + prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, - prog_phy_link_rate); - - phy_hard_reset_v3_hw(hisi_hba, phy_no); + prog_phy_link_rate); } static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) @@ -1933,8 +1981,9 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1); /* wait until bus idle */ - rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE + - AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100); + rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + + AM_CURR_TRANS_RETURN, status, + status == 0x3, 10, 100); if (rc) { dev_err(dev, "axi bus is not idle, rc = %d\n", rc); return rc; @@ -1945,13 +1994,82 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) return hw_init_v3_hw(hisi_hba); } +static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type, + u8 reg_index, u8 reg_count, u8 *write_data) +{ + struct device *dev = hisi_hba->dev; + u32 *data = (u32 *)write_data; + int i; + + switch (reg_type) { + case SAS_GPIO_REG_TX: + if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) { + dev_err(dev, "write gpio: invalid reg range[%d, %d]\n", + reg_index, reg_index + reg_count - 1); + return -EINVAL; + } + + for (i = 0; i < reg_count; i++) + hisi_sas_write32(hisi_hba, + SAS_GPIO_TX_0_1 + (reg_index + i) * 4, + data[i]); + break; + default: + dev_err(dev, "write gpio: unsupported or bad reg type %d\n", + reg_type); + return -EINVAL; + } + + return 0; +} + +static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms) +{ + struct device *dev = hisi_hba->dev; + int entries, entries_old = 0, time; + + for (time = 0; time < timeout_ms; time += delay_ms) { + entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); + if (entries == entries_old) + break; + + entries_old = entries; + msleep(delay_ms); + } + + dev_dbg(dev, "wait commands complete %dms\n", time); +} + +static struct scsi_host_template sht_v3_hw = { + .name = DRV_NAME, + .module = THIS_MODULE, + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = hisi_sas_slave_configure, + .scan_finished = hisi_sas_scan_finished, + .scan_start = hisi_sas_scan_start, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, + .shost_attrs = host_attrs, +}; + static const struct hisi_sas_hw hisi_sas_v3_hw = { .hw_init = hisi_sas_v3_init, .setup_itct = setup_itct_v3_hw, .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW, .get_wideport_bitmap = get_wideport_bitmap_v3_hw, .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), - .free_device = free_device_v3_hw, + .clear_itct = clear_itct_v3_hw, .sl_notify = sl_notify_v3_hw, .prep_ssp = prep_ssp_v3_hw, .prep_smp = prep_smp_v3_hw, @@ -1970,6 +2088,8 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .soft_reset = soft_reset_v3_hw, .get_phys_state = get_phys_state_v3_hw, .get_events = phy_get_events_v3_hw, + .write_gpio = write_gpio_v3_hw, + .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw, }; static struct Scsi_Host * @@ -1979,7 +2099,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; - shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba)); + shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba)); if (!shost) { dev_err(dev, "shost alloc failed\n"); return NULL; @@ -2093,8 +2213,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) sha->sas_port[i] = &hisi_hba->port[i].sas_port; } - hisi_sas_init_add(hisi_hba); - rc = scsi_add_host(shost, dev); if (rc) goto err_out_ha; @@ -2146,6 +2264,9 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) struct hisi_hba *hisi_hba = sha->lldd_ha; struct Scsi_Host *shost = sha->core.shost; + if (timer_pending(&hisi_hba->timer)) + del_timer(&hisi_hba->timer); + sas_unregister_ha(sha); sas_remove_host(sha->core.shost); @@ -2157,21 +2278,276 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) scsi_host_put(shost); } +static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = { + { .irq_msk = BIT(19), .msg = "HILINK_INT" }, + { .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" }, + { .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" }, + { .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" }, + { .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" }, + { .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" }, + { .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" }, + { .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" }, + { .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" }, + { .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" }, + { .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" }, + { .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" }, + { .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" }, +}; + +static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = { + { .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" }, + { .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" }, + { .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" }, + { .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" }, + { .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" }, + { .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" }, + { .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" }, + { .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" }, + { .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" }, + { .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" }, + { .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" }, + { .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" }, + { .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" }, + { .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" }, + { .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" }, + { .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" }, + { .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" }, + { .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" }, + { .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" }, + { .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" }, + { .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" }, + { .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" }, + { .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" }, + { .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" }, + { .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" }, + { .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" }, + { .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" }, + { .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" }, + { .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" }, + { .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" }, + { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" }, +}; + +static const struct hisi_sas_hw_error sas_ras_intr2_nfe[] = { + { .irq_msk = BIT(0), .msg = "DMAC0_AXI_BUS_ERR" }, + { .irq_msk = BIT(1), .msg = "DMAC1_AXI_BUS_ERR" }, + { .irq_msk = BIT(2), .msg = "DMAC2_AXI_BUS_ERR" }, + { .irq_msk = BIT(3), .msg = "DMAC3_AXI_BUS_ERR" }, + { .irq_msk = BIT(4), .msg = "DMAC4_AXI_BUS_ERR" }, + { .irq_msk = BIT(5), .msg = "DMAC5_AXI_BUS_ERR" }, + { .irq_msk = BIT(6), .msg = "DMAC6_AXI_BUS_ERR" }, + { .irq_msk = BIT(7), .msg = "DMAC7_AXI_BUS_ERR" }, + { .irq_msk = BIT(8), .msg = "DMAC0_FIFO_OMIT_ERR" }, + { .irq_msk = BIT(9), .msg = "DMAC1_FIFO_OMIT_ERR" }, + { .irq_msk = BIT(10), .msg = "DMAC2_FIFO_OMIT_ERR" }, + { .irq_msk = BIT(11), .msg = "DMAC3_FIFO_OMIT_ERR" }, + { .irq_msk = BIT(12), .msg = "DMAC4_FIFO_OMIT_ERR" }, + { .irq_msk = BIT(13), .msg = "DMAC5_FIFO_OMIT_ERR" }, + { .irq_msk = BIT(14), .msg = "DMAC6_FIFO_OMIT_ERR" }, + { .irq_msk = BIT(15), .msg = "DMAC7_FIFO_OMIT_ERR" }, + { .irq_msk = BIT(16), .msg = "HGC_RLSE_SLOT_UNMATCH" }, + { .irq_msk = BIT(17), .msg = "HGC_LM_ADD_FCH_LIST_ERR" }, + { .irq_msk = BIT(18), .msg = "HGC_AXI_BUS_ERR" }, + { .irq_msk = BIT(19), .msg = "HGC_FIFO_OMIT_ERR" }, +}; + +static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + const struct hisi_sas_hw_error *ras_error; + bool need_reset = false; + u32 irq_value; + int i; + + irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0); + for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) { + ras_error = &sas_ras_intr0_nfe[i]; + if (ras_error->irq_msk & irq_value) { + dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n", + ras_error->msg, irq_value); + need_reset = true; + } + } + hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value); + + irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1); + for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) { + ras_error = &sas_ras_intr1_nfe[i]; + if (ras_error->irq_msk & irq_value) { + dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n", + ras_error->msg, irq_value); + need_reset = true; + } + } + hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value); + + irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR2); + for (i = 0; i < ARRAY_SIZE(sas_ras_intr2_nfe); i++) { + ras_error = &sas_ras_intr2_nfe[i]; + if (ras_error->irq_msk & irq_value) { + dev_warn(dev, "SAS_RAS_INTR2: %s(irq_value=0x%x) found.\n", + ras_error->msg, irq_value); + need_reset = true; + } + } + hisi_sas_write32(hisi_hba, SAS_RAS_INTR2, irq_value); + + return need_reset; +} + +static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + struct device *dev = hisi_hba->dev; + + dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (process_non_fatal_error_v3_hw(hisi_hba)) + return PCI_ERS_RESULT_NEED_RESET; + + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev) +{ + return PCI_ERS_RESULT_RECOVERED; +} + +static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev) +{ + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + struct device *dev = hisi_hba->dev; + HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); + + dev_info(dev, "PCI error: slot reset callback!!\n"); + queue_work(hisi_hba->wq, &r.work); + wait_for_completion(r.completion); + if (r.done) + return PCI_ERS_RESULT_RECOVERED; + + return PCI_ERS_RESULT_DISCONNECT; +} + enum { /* instances of the controller */ hip08, }; +static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + struct device *dev = hisi_hba->dev; + struct Scsi_Host *shost = hisi_hba->shost; + u32 device_state, status; + int rc; + u32 reg_val; + + if (!pdev->pm_cap) { + dev_err(dev, "PCI PM not supported\n"); + return -ENODEV; + } + + set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); + scsi_block_requests(shost); + set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + flush_workqueue(hisi_hba->wq); + /* disable DQ/PHY/bus */ + interrupt_disable_v3_hw(hisi_hba); + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); + hisi_sas_kill_tasklets(hisi_hba); + + hisi_sas_stop_phys(hisi_hba); + + reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL); + reg_val |= 0x1; + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL, reg_val); + + /* wait until bus idle */ + rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + + AM_CURR_TRANS_RETURN, status, + status == 0x3, 10, 100); + if (rc) { + dev_err(dev, "axi bus is not idle, rc = %d\n", rc); + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); + scsi_unblock_requests(shost); + return rc; + } + + hisi_sas_init_mem(hisi_hba); + + device_state = pci_choose_state(pdev, state); + dev_warn(dev, "entering operating state [D%d]\n", + device_state); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, device_state); + + hisi_sas_release_tasks(hisi_hba); + + sas_suspend_ha(sha); + return 0; +} + +static int hisi_sas_v3_resume(struct pci_dev *pdev) +{ + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + struct Scsi_Host *shost = hisi_hba->shost; + struct device *dev = hisi_hba->dev; + unsigned int rc; + u32 device_state = pdev->current_state; + + dev_warn(dev, "resuming from operating state [D%d]\n", + device_state); + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + rc = pci_enable_device(pdev); + if (rc) + dev_err(dev, "enable device failed during resume (%d)\n", rc); + + pci_set_master(pdev); + scsi_unblock_requests(shost); + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + + sas_prep_resume_ha(sha); + init_reg_v3_hw(hisi_hba); + hisi_hba->hw->phys_init(hisi_hba); + sas_resume_ha(sha); + clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); + + return 0; +} + static const struct pci_device_id sas_v3_pci_table[] = { { PCI_VDEVICE(HUAWEI, 0xa230), hip08 }, {} }; +MODULE_DEVICE_TABLE(pci, sas_v3_pci_table); + +static const struct pci_error_handlers hisi_sas_err_handler = { + .error_detected = hisi_sas_error_detected_v3_hw, + .mmio_enabled = hisi_sas_mmio_enabled_v3_hw, + .slot_reset = hisi_sas_slot_reset_v3_hw, +}; static struct pci_driver sas_v3_pci_driver = { .name = DRV_NAME, .id_table = sas_v3_pci_table, .probe = hisi_sas_v3_probe, .remove = hisi_sas_v3_remove, + .suspend = hisi_sas_v3_suspend, + .resume = hisi_sas_v3_resume, + .err_handler = &hisi_sas_err_handler, }; module_pci_driver(sas_v3_pci_driver); @@ -2179,4 +2555,4 @@ module_pci_driver(sas_v3_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device"); -MODULE_ALIAS("platform:" DRV_NAME); +MODULE_ALIAS("pci:" DRV_NAME); diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index cb6aa802c48e..092a971d066b 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -1,8 +1,8 @@ #/******************************************************************* # * This file is part of the Emulex Linux Device Driver for * # * Fibre Channel Host Bus Adapters. * -# * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * -# * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * +# * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * +# * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * # * Copyright (C) 2004-2012 Emulex. All rights reserved. * # * EMULEX and SLI are trademarks of Emulex. * # * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 20b249a649dd..e0d0da5f43d6 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -840,8 +840,7 @@ struct lpfc_hba { #define LPFC_ENABLE_FCP 1 #define LPFC_ENABLE_NVME 2 #define LPFC_ENABLE_BOTH 3 - uint32_t nvme_embed_pbde; - uint32_t fcp_embed_pbde; + uint32_t cfg_enable_pbde; uint32_t io_channel_irqs; /* number of irqs for io channels */ struct nvmet_fc_target_port *targetport; lpfc_vpd_t vpd; /* vital product data */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index fdabba7b0282..7a9b6f7983c5 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -64,6 +64,9 @@ #define LPFC_MIN_MRQ_POST 512 #define LPFC_MAX_MRQ_POST 2048 +#define LPFC_MAX_NVME_INFO_TMP_LEN 100 +#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n" + /* * Write key size should be multiple of 4. If write key is changed * make sure that library write key is also changed. @@ -158,14 +161,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, char *statep; int i; int len = 0; + char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0}; if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { - len += snprintf(buf, PAGE_SIZE, "NVME Disabled\n"); + len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); return len; } if (phba->nvmet_support) { if (!phba->targetport) { - len = snprintf(buf, PAGE_SIZE, + len = scnprintf(buf, PAGE_SIZE, "NVME Target: x%llx is not allocated\n", wwn_to_u64(vport->fc_portname.u.wwn)); return len; @@ -175,135 +179,169 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, statep = "REGISTERED"; else statep = "INIT"; - len += snprintf(buf + len, PAGE_SIZE - len, - "NVME Target Enabled State %s\n", - statep); - len += snprintf(buf + len, PAGE_SIZE - len, - "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", - "NVME Target: lpfc", - phba->brd_no, - wwn_to_u64(vport->fc_portname.u.wwn), - wwn_to_u64(vport->fc_nodename.u.wwn), - phba->targetport->port_id); - - len += snprintf(buf + len, PAGE_SIZE - len, - "\nNVME Target: Statistics\n"); + scnprintf(tmp, sizeof(tmp), + "NVME Target Enabled State %s\n", + statep); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", + "NVME Target: lpfc", + phba->brd_no, + wwn_to_u64(vport->fc_portname.u.wwn), + wwn_to_u64(vport->fc_nodename.u.wwn), + phba->targetport->port_id); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE) + >= PAGE_SIZE) + goto buffer_done; + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - len += snprintf(buf+len, PAGE_SIZE-len, - "LS: Rcv %08x Drop %08x Abort %08x\n", - atomic_read(&tgtp->rcv_ls_req_in), - atomic_read(&tgtp->rcv_ls_req_drop), - atomic_read(&tgtp->xmt_ls_abort)); + scnprintf(tmp, sizeof(tmp), + "LS: Rcv %08x Drop %08x Abort %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_drop), + atomic_read(&tgtp->xmt_ls_abort)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + if (atomic_read(&tgtp->rcv_ls_req_in) != atomic_read(&tgtp->rcv_ls_req_out)) { - len += snprintf(buf+len, PAGE_SIZE-len, - "Rcv LS: in %08x != out %08x\n", - atomic_read(&tgtp->rcv_ls_req_in), - atomic_read(&tgtp->rcv_ls_req_out)); + scnprintf(tmp, sizeof(tmp), + "Rcv LS: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_out)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; } - len += snprintf(buf+len, PAGE_SIZE-len, - "LS: Xmt %08x Drop %08x Cmpl %08x\n", - atomic_read(&tgtp->xmt_ls_rsp), - atomic_read(&tgtp->xmt_ls_drop), - atomic_read(&tgtp->xmt_ls_rsp_cmpl)); - - len += snprintf(buf + len, PAGE_SIZE - len, - "LS: RSP Abort %08x xb %08x Err %08x\n", - atomic_read(&tgtp->xmt_ls_rsp_aborted), - atomic_read(&tgtp->xmt_ls_rsp_xb_set), - atomic_read(&tgtp->xmt_ls_rsp_error)); - - len += snprintf(buf+len, PAGE_SIZE-len, - "FCP: Rcv %08x Defer %08x Release %08x " - "Drop %08x\n", - atomic_read(&tgtp->rcv_fcp_cmd_in), - atomic_read(&tgtp->rcv_fcp_cmd_defer), - atomic_read(&tgtp->xmt_fcp_release), - atomic_read(&tgtp->rcv_fcp_cmd_drop)); + scnprintf(tmp, sizeof(tmp), + "LS: Xmt %08x Drop %08x Cmpl %08x\n", + atomic_read(&tgtp->xmt_ls_rsp), + atomic_read(&tgtp->xmt_ls_drop), + atomic_read(&tgtp->xmt_ls_rsp_cmpl)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "LS: RSP Abort %08x xb %08x Err %08x\n", + atomic_read(&tgtp->xmt_ls_rsp_aborted), + atomic_read(&tgtp->xmt_ls_rsp_xb_set), + atomic_read(&tgtp->xmt_ls_rsp_error)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "FCP: Rcv %08x Defer %08x Release %08x " + "Drop %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_defer), + atomic_read(&tgtp->xmt_fcp_release), + atomic_read(&tgtp->rcv_fcp_cmd_drop)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; if (atomic_read(&tgtp->rcv_fcp_cmd_in) != atomic_read(&tgtp->rcv_fcp_cmd_out)) { - len += snprintf(buf+len, PAGE_SIZE-len, - "Rcv FCP: in %08x != out %08x\n", - atomic_read(&tgtp->rcv_fcp_cmd_in), - atomic_read(&tgtp->rcv_fcp_cmd_out)); + scnprintf(tmp, sizeof(tmp), + "Rcv FCP: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; } - len += snprintf(buf+len, PAGE_SIZE-len, - "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x " - "drop %08x\n", - atomic_read(&tgtp->xmt_fcp_read), - atomic_read(&tgtp->xmt_fcp_read_rsp), - atomic_read(&tgtp->xmt_fcp_write), - atomic_read(&tgtp->xmt_fcp_rsp), - atomic_read(&tgtp->xmt_fcp_drop)); - - len += snprintf(buf+len, PAGE_SIZE-len, - "FCP Rsp Cmpl: %08x err %08x drop %08x\n", - atomic_read(&tgtp->xmt_fcp_rsp_cmpl), - atomic_read(&tgtp->xmt_fcp_rsp_error), - atomic_read(&tgtp->xmt_fcp_rsp_drop)); - - len += snprintf(buf+len, PAGE_SIZE-len, - "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", - atomic_read(&tgtp->xmt_fcp_rsp_aborted), - atomic_read(&tgtp->xmt_fcp_rsp_xb_set), - atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); - - len += snprintf(buf + len, PAGE_SIZE - len, - "ABORT: Xmt %08x Cmpl %08x\n", - atomic_read(&tgtp->xmt_fcp_abort), - atomic_read(&tgtp->xmt_fcp_abort_cmpl)); - - len += snprintf(buf + len, PAGE_SIZE - len, - "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x", - atomic_read(&tgtp->xmt_abort_sol), - atomic_read(&tgtp->xmt_abort_unsol), - atomic_read(&tgtp->xmt_abort_rsp), - atomic_read(&tgtp->xmt_abort_rsp_error)); - - len += snprintf(buf + len, PAGE_SIZE - len, - "DELAY: ctx %08x fod %08x wqfull %08x\n", - atomic_read(&tgtp->defer_ctx), - atomic_read(&tgtp->defer_fod), - atomic_read(&tgtp->defer_wqfull)); + scnprintf(tmp, sizeof(tmp), + "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x " + "drop %08x\n", + atomic_read(&tgtp->xmt_fcp_read), + atomic_read(&tgtp->xmt_fcp_read_rsp), + atomic_read(&tgtp->xmt_fcp_write), + atomic_read(&tgtp->xmt_fcp_rsp), + atomic_read(&tgtp->xmt_fcp_drop)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "FCP Rsp Cmpl: %08x err %08x drop %08x\n", + atomic_read(&tgtp->xmt_fcp_rsp_cmpl), + atomic_read(&tgtp->xmt_fcp_rsp_error), + atomic_read(&tgtp->xmt_fcp_rsp_drop)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", + atomic_read(&tgtp->xmt_fcp_rsp_aborted), + atomic_read(&tgtp->xmt_fcp_rsp_xb_set), + atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "ABORT: Xmt %08x Cmpl %08x\n", + atomic_read(&tgtp->xmt_fcp_abort), + atomic_read(&tgtp->xmt_fcp_abort_cmpl)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n", + atomic_read(&tgtp->xmt_abort_sol), + atomic_read(&tgtp->xmt_abort_unsol), + atomic_read(&tgtp->xmt_abort_rsp), + atomic_read(&tgtp->xmt_abort_rsp_error)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "DELAY: ctx %08x fod %08x wqfull %08x\n", + atomic_read(&tgtp->defer_ctx), + atomic_read(&tgtp->defer_fod), + atomic_read(&tgtp->defer_wqfull)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; /* Calculate outstanding IOs */ tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); tot += atomic_read(&tgtp->xmt_fcp_release); tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; - len += snprintf(buf + len, PAGE_SIZE - len, - "IO_CTX: %08x WAIT: cur %08x tot %08x\n" - "CTX Outstanding %08llx\n", - phba->sli4_hba.nvmet_xri_cnt, - phba->sli4_hba.nvmet_io_wait_cnt, - phba->sli4_hba.nvmet_io_wait_total, - tot); - - len += snprintf(buf+len, PAGE_SIZE-len, "\n"); - return len; + scnprintf(tmp, sizeof(tmp), + "IO_CTX: %08x WAIT: cur %08x tot %08x\n" + "CTX Outstanding %08llx\n\n", + phba->sli4_hba.nvmet_xri_cnt, + phba->sli4_hba.nvmet_io_wait_cnt, + phba->sli4_hba.nvmet_io_wait_total, + tot); + strlcat(buf, tmp, PAGE_SIZE); + goto buffer_done; } localport = vport->localport; if (!localport) { - len = snprintf(buf, PAGE_SIZE, + len = scnprintf(buf, PAGE_SIZE, "NVME Initiator x%llx is not allocated\n", wwn_to_u64(vport->fc_portname.u.wwn)); return len; } lport = (struct lpfc_nvme_lport *)localport->private; - len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n"); - - spin_lock_irq(shost->host_lock); - len += snprintf(buf + len, PAGE_SIZE - len, - "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n", - phba->brd_no, - phba->sli4_hba.max_cfg_param.max_xri, - phba->sli4_hba.nvme_xri_max, - phba->sli4_hba.scsi_xri_max, - lpfc_sli4_get_els_iocb_cnt(phba)); + if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + rcu_read_lock(); + scnprintf(tmp, sizeof(tmp), + "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n", + phba->brd_no, + phba->sli4_hba.max_cfg_param.max_xri, + phba->sli4_hba.nvme_xri_max, + phba->sli4_hba.scsi_xri_max, + lpfc_sli4_get_els_iocb_cnt(phba)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; /* Port state is only one of two values for now. */ if (localport->port_id) @@ -311,13 +349,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, else statep = "UNKNOWN "; - len += snprintf(buf + len, PAGE_SIZE - len, - "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n", - "NVME LPORT lpfc", - phba->brd_no, - wwn_to_u64(vport->fc_portname.u.wwn), - wwn_to_u64(vport->fc_nodename.u.wwn), - localport->port_id, statep); + scnprintf(tmp, sizeof(tmp), + "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n", + "NVME LPORT lpfc", + phba->brd_no, + wwn_to_u64(vport->fc_portname.u.wwn), + wwn_to_u64(vport->fc_nodename.u.wwn), + localport->port_id, statep); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { rport = lpfc_ndlp_get_nrport(ndlp); @@ -343,56 +383,77 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, } /* Tab in to show lport ownership. */ - len += snprintf(buf + len, PAGE_SIZE - len, - "NVME RPORT "); - if (phba->brd_no >= 10) - len += snprintf(buf + len, PAGE_SIZE - len, " "); - - len += snprintf(buf + len, PAGE_SIZE - len, "WWPN x%llx ", - nrport->port_name); - len += snprintf(buf + len, PAGE_SIZE - len, "WWNN x%llx ", - nrport->node_name); - len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ", - nrport->port_id); + if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + if (phba->brd_no >= 10) { + if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + + scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", + nrport->port_name); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", + nrport->node_name); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "DID x%06x ", + nrport->port_id); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; /* An NVME rport can have multiple roles. */ - if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) - len += snprintf(buf + len, PAGE_SIZE - len, - "INITIATOR "); - if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) - len += snprintf(buf + len, PAGE_SIZE - len, - "TARGET "); - if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) - len += snprintf(buf + len, PAGE_SIZE - len, - "DISCSRVC "); + if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { + if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { + if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { + if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | - FC_PORT_ROLE_NVME_DISCOVERY)) - len += snprintf(buf + len, PAGE_SIZE - len, - "UNKNOWN ROLE x%x", - nrport->port_role); - - len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep); - /* Terminate the string. */ - len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + FC_PORT_ROLE_NVME_DISCOVERY)) { + scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", + nrport->port_role); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + + scnprintf(tmp, sizeof(tmp), "%s\n", statep); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; } - spin_unlock_irq(shost->host_lock); + rcu_read_unlock(); if (!lport) - return len; - - len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n"); - len += snprintf(buf+len, PAGE_SIZE-len, - "LS: Xmt %010x Cmpl %010x Abort %08x\n", - atomic_read(&lport->fc4NvmeLsRequests), - atomic_read(&lport->fc4NvmeLsCmpls), - atomic_read(&lport->xmt_ls_abort)); - - len += snprintf(buf + len, PAGE_SIZE - len, - "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n", - atomic_read(&lport->xmt_ls_err), - atomic_read(&lport->cmpl_ls_xb), - atomic_read(&lport->cmpl_ls_err)); + goto buffer_done; + + if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "LS: Xmt %010x Cmpl %010x Abort %08x\n", + atomic_read(&lport->fc4NvmeLsRequests), + atomic_read(&lport->fc4NvmeLsCmpls), + atomic_read(&lport->xmt_ls_abort)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n", + atomic_read(&lport->xmt_ls_err), + atomic_read(&lport->cmpl_ls_xb), + atomic_read(&lport->cmpl_ls_err)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; totin = 0; totout = 0; @@ -405,25 +466,46 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, data3 = atomic_read(&cstat->fc4NvmeControlRequests); totout += (data1 + data2 + data3); } - len += snprintf(buf+len, PAGE_SIZE-len, - "Total FCP Cmpl %016llx Issue %016llx " - "OutIO %016llx\n", - totin, totout, totout - totin); - - len += snprintf(buf+len, PAGE_SIZE-len, - " abort %08x noxri %08x nondlp %08x qdepth %08x " - "wqerr %08x err %08x\n", - atomic_read(&lport->xmt_fcp_abort), - atomic_read(&lport->xmt_fcp_noxri), - atomic_read(&lport->xmt_fcp_bad_ndlp), - atomic_read(&lport->xmt_fcp_qdepth), - atomic_read(&lport->xmt_fcp_err), - atomic_read(&lport->xmt_fcp_wqerr)); - - len += snprintf(buf + len, PAGE_SIZE - len, - "FCP CMPL: xb %08x Err %08x\n", - atomic_read(&lport->cmpl_fcp_xb), - atomic_read(&lport->cmpl_fcp_err)); + scnprintf(tmp, sizeof(tmp), + "Total FCP Cmpl %016llx Issue %016llx " + "OutIO %016llx\n", + totin, totout, totout - totin); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "\tabort %08x noxri %08x nondlp %08x qdepth %08x " + "wqerr %08x err %08x\n", + atomic_read(&lport->xmt_fcp_abort), + atomic_read(&lport->xmt_fcp_noxri), + atomic_read(&lport->xmt_fcp_bad_ndlp), + atomic_read(&lport->xmt_fcp_qdepth), + atomic_read(&lport->xmt_fcp_err), + atomic_read(&lport->xmt_fcp_wqerr)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "FCP CMPL: xb %08x Err %08x\n", + atomic_read(&lport->cmpl_fcp_xb), + atomic_read(&lport->cmpl_fcp_err)); + strlcat(buf, tmp, PAGE_SIZE); + +buffer_done: + len = strnlen(buf, PAGE_SIZE); + + if (unlikely(len >= (PAGE_SIZE - 1))) { + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, + "6314 Catching potential buffer " + "overflow > PAGE_SIZE = %lu bytes\n", + PAGE_SIZE); + strlcpy(buf + PAGE_SIZE - 1 - + strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1), + LPFC_NVME_INFO_MORE_STR, + strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1) + + 1); + } + return len; } @@ -5854,6 +5936,24 @@ lpfc_get_host_speed(struct Scsi_Host *shost) fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } + } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { + switch (phba->fc_linkspeed) { + case LPFC_ASYNC_LINK_SPEED_10GBPS: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + case LPFC_ASYNC_LINK_SPEED_25GBPS: + fc_host_speed(shost) = FC_PORTSPEED_25GBIT; + break; + case LPFC_ASYNC_LINK_SPEED_40GBPS: + fc_host_speed(shost) = FC_PORTSPEED_40GBIT; + break; + case LPFC_ASYNC_LINK_SPEED_100GBPS: + fc_host_speed(shost) = FC_PORTSPEED_100GBIT; + break; + default: + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + break; + } } else fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; @@ -6472,6 +6572,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_auto_imax = 0; phba->initial_imax = phba->cfg_fcp_imax; + phba->cfg_enable_pbde = 0; + /* A value of 0 means use the number of CPUs found in the system */ if (phba->cfg_fcp_io_channel == 0) phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu; diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h index 931db52692f5..9659a8fff971 100644 --- a/drivers/scsi/lpfc/lpfc_attr.h +++ b/drivers/scsi/lpfc/lpfc_attr.h @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index edb1a18a6414..90745feca808 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index e7d95a4e8042..32347c87e3b4 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2010-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h index 6b32b0ae7506..43cf46a3a71f 100644 --- a/drivers/scsi/lpfc/lpfc_compat.h +++ b/drivers/scsi/lpfc/lpfc_compat.h @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 437476df1130..fe987b26fcd9 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index cbd1372a134a..41b32f48c705 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index f32eaeb2225a..30efc7bf91bd 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 5a7547f9d8d8..28e2b60fc5c0 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -150,6 +150,9 @@ struct lpfc_node_rrq { unsigned long rrq_stop_time; }; +#define lpfc_ndlp_check_qdepth(phba, ndlp) \ + (ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri) + /* Defines for nlp_flag (uint32) */ #define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ #define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 3d34c8bf285a..0e7318cd7d0a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -5640,8 +5640,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) " mbx status x%x\n", shdr_status, shdr_add_status, mb->mbxStatus); - if (mb->mbxStatus && !(shdr_status && - shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) { + if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || + (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || + (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { mempool_free(pmb, phba->mbox_mem_pool); goto error; } @@ -5661,6 +5662,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lcb_res = (struct fc_lcb_res_frame *) (((struct lpfc_dmabuf *)elsiocb->context2)->virt); + memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); icmd = &elsiocb->iocb; icmd->ulpContext = lcb_context->rx_id; icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; @@ -5669,7 +5671,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) *((uint32_t *)(pcmd)) = ELS_CMD_ACC; lcb_res->lcb_sub_command = lcb_context->sub_command; lcb_res->lcb_type = lcb_context->type; + lcb_res->capability = lcb_context->capability; lcb_res->lcb_frequency = lcb_context->frequency; + lcb_res->lcb_duration = lcb_context->duration; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); @@ -5712,6 +5716,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport, uint32_t beacon_state) { struct lpfc_hba *phba = vport->phba; + union lpfc_sli4_cfg_shdr *cfg_shdr; LPFC_MBOXQ_t *mbox = NULL; uint32_t len; int rc; @@ -5720,6 +5725,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport, if (!mbox) return 1; + cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; len = sizeof(struct lpfc_mbx_set_beacon_config) - sizeof(struct lpfc_sli4_cfg_mhdr); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, @@ -5732,8 +5738,40 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport, phba->sli4_hba.physical_port); bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, beacon_state); - bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1); - bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0); + mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ + + /* + * Check bv1s bit before issuing the mailbox + * if bv1s == 1, LCB V1 supported + * else, LCB V0 supported + */ + + if (phba->sli4_hba.pc_sli4_params.bv1s) { + /* COMMON_SET_BEACON_CONFIG_V1 */ + cfg_shdr->request.word9 = BEACON_VERSION_V1; + lcb_context->capability |= LCB_CAPABILITY_DURATION; + bf_set(lpfc_mbx_set_beacon_port_type, + &mbox->u.mqe.un.beacon_config, 0); + bf_set(lpfc_mbx_set_beacon_duration_v1, + &mbox->u.mqe.un.beacon_config, + be16_to_cpu(lcb_context->duration)); + } else { + /* COMMON_SET_BEACON_CONFIG_V0 */ + if (be16_to_cpu(lcb_context->duration) != 0) { + mempool_free(mbox, phba->mbox_mem_pool); + return 1; + } + cfg_shdr->request.word9 = BEACON_VERSION_V0; + lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); + bf_set(lpfc_mbx_set_beacon_state, + &mbox->u.mqe.un.beacon_config, beacon_state); + bf_set(lpfc_mbx_set_beacon_port_type, + &mbox->u.mqe.un.beacon_config, 1); + bf_set(lpfc_mbx_set_beacon_duration, + &mbox->u.mqe.un.beacon_config, + be16_to_cpu(lcb_context->duration)); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); @@ -5784,24 +5822,16 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, beacon->lcb_frequency, be16_to_cpu(beacon->lcb_duration)); - if (phba->sli_rev < LPFC_SLI_REV4 || - (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != - LPFC_SLI_INTF_IF_TYPE_2)) { - rjt_err = LSRJT_CMD_UNSUPPORTED; - goto rjt; - } - - if (phba->hba_flag & HBA_FCOE_MODE) { - rjt_err = LSRJT_CMD_UNSUPPORTED; - goto rjt; - } if (beacon->lcb_sub_command != LPFC_LCB_ON && beacon->lcb_sub_command != LPFC_LCB_OFF) { rjt_err = LSRJT_CMD_UNSUPPORTED; goto rjt; } - if (beacon->lcb_sub_command == LPFC_LCB_ON && - be16_to_cpu(beacon->lcb_duration) != 0) { + + if (phba->sli_rev < LPFC_SLI_REV4 || + phba->hba_flag & HBA_FCOE_MODE || + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < + LPFC_SLI_INTF_IF_TYPE_2)) { rjt_err = LSRJT_CMD_UNSUPPORTED; goto rjt; } @@ -5814,8 +5844,10 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; lcb_context->sub_command = beacon->lcb_sub_command; + lcb_context->capability = 0; lcb_context->type = beacon->lcb_type; lcb_context->frequency = beacon->lcb_frequency; + lcb_context->duration = beacon->lcb_duration; lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; lcb_context->rx_id = cmdiocb->iocb.ulpContext; lcb_context->ndlp = lpfc_nlp_get(ndlp); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 66a3aa0f1255..f430d3dddfa1 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 08a3f1520159..009aa0eee040 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -1065,14 +1065,17 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */ struct fc_lcb_request_frame { uint32_t lcb_command; /* ELS command opcode (0x81) */ uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */ -#define LPFC_LCB_ON 0x1 -#define LPFC_LCB_OFF 0x2 - uint8_t reserved[3]; - +#define LPFC_LCB_ON 0x1 +#define LPFC_LCB_OFF 0x2 + uint8_t reserved[2]; + uint8_t capability; /* LCB Payload Word 1, bit 0:7 */ uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */ -#define LPFC_LCB_GREEN 0x1 -#define LPFC_LCB_AMBER 0x2 +#define LPFC_LCB_GREEN 0x1 +#define LPFC_LCB_AMBER 0x2 uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */ +#define LCB_CAPABILITY_DURATION 1 +#define BEACON_VERSION_V1 1 +#define BEACON_VERSION_V0 0 uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */ }; @@ -1082,7 +1085,8 @@ struct fc_lcb_request_frame { struct fc_lcb_res_frame { uint32_t lcb_ls_acc; /* Acceptance of LCB request (0x02) */ uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */ - uint8_t reserved[3]; + uint8_t reserved[2]; + uint8_t capability; /* LCB Payload Word 1, bit 0:7 */ uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */ uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */ uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index f43f0bacb77a..083f8c8706e5 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1790,9 +1790,12 @@ struct lpfc_mbx_set_beacon_config { #define lpfc_mbx_set_beacon_duration_SHIFT 16 #define lpfc_mbx_set_beacon_duration_MASK 0x000000FF #define lpfc_mbx_set_beacon_duration_WORD word4 -#define lpfc_mbx_set_beacon_status_duration_SHIFT 24 -#define lpfc_mbx_set_beacon_status_duration_MASK 0x000000FF -#define lpfc_mbx_set_beacon_status_duration_WORD word4 + +/* COMMON_SET_BEACON_CONFIG_V1 */ +#define lpfc_mbx_set_beacon_duration_v1_SHIFT 16 +#define lpfc_mbx_set_beacon_duration_v1_MASK 0x0000FFFF +#define lpfc_mbx_set_beacon_duration_v1_WORD word4 + uint32_t word5; /* RESERVED */ }; struct lpfc_id_range { @@ -2243,6 +2246,7 @@ struct lpfc_mbx_redisc_fcf_tbl { */ #define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67 #define ADD_STATUS_FW_NOT_SUPPORTED 0xEB +#define ADD_STATUS_INVALID_REQUEST 0x4B struct lpfc_mbx_sli4_config { struct mbox_header header; @@ -3392,7 +3396,41 @@ struct lpfc_sli4_parameters { #define cfg_nosr_SHIFT 9 #define cfg_nosr_MASK 0x00000001 #define cfg_nosr_WORD word19 -#define LPFC_NODELAY_MAX_IO 32 + +#define cfg_bv1s_SHIFT 10 +#define cfg_bv1s_MASK 0x00000001 +#define cfg_bv1s_WORD word19 + + uint32_t word20; +#define cfg_max_tow_xri_SHIFT 0 +#define cfg_max_tow_xri_MASK 0x0000ffff +#define cfg_max_tow_xri_WORD word20 + + uint32_t word21; /* RESERVED */ + uint32_t word22; /* RESERVED */ + uint32_t word23; /* RESERVED */ + + uint32_t word24; +#define cfg_frag_field_offset_SHIFT 0 +#define cfg_frag_field_offset_MASK 0x0000ffff +#define cfg_frag_field_offset_WORD word24 + +#define cfg_frag_field_size_SHIFT 16 +#define cfg_frag_field_size_MASK 0x0000ffff +#define cfg_frag_field_size_WORD word24 + + uint32_t word25; +#define cfg_sgl_field_offset_SHIFT 0 +#define cfg_sgl_field_offset_MASK 0x0000ffff +#define cfg_sgl_field_offset_WORD word25 + +#define cfg_sgl_field_size_SHIFT 16 +#define cfg_sgl_field_size_MASK 0x0000ffff +#define cfg_sgl_field_size_WORD word25 + + uint32_t word26; /* Chain SGE initial value LOW */ + uint32_t word27; /* Chain SGE initial value HIGH */ +#define LPFC_NODELAY_MAX_IO 32 }; #define LPFC_SET_UE_RECOVERY 0x10 diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h index 07ee34017d88..d48414e295a0 100644 --- a/drivers/scsi/lpfc/lpfc_ids.h +++ b/drivers/scsi/lpfc/lpfc_ids.h @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 1f2d8a29b075..3b1d83feb98f 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -10393,6 +10393,11 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || !nvmet_xri_cmpl) { if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { + if (!nvmet_xri_cmpl) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6424 NVMET XRI exchange busy " + "wait time: %d seconds.\n", + wait_time/1000); if (!nvme_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6100 NVME XRI exchange busy " @@ -10645,6 +10650,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); + sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, mbx_sli4_parameters); sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); @@ -10674,18 +10680,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; } - /* Only embed PBDE for if_type 6 */ - if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == - LPFC_SLI_INTF_IF_TYPE_6) { - phba->fcp_embed_pbde = 1; - phba->nvme_embed_pbde = 1; - } - - /* PBDE support requires xib be set */ - if (!bf_get(cfg_xib, mbx_sli4_parameters)) { - phba->fcp_embed_pbde = 0; - phba->nvme_embed_pbde = 0; - } + /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ + if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) + phba->cfg_enable_pbde = 0; /* * To support Suppress Response feature we must satisfy 3 conditions. @@ -10719,10 +10717,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->fcp_embed_io = 0; lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, - "6422 XIB %d: FCP %d %d NVME %d %d %d %d\n", + "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", bf_get(cfg_xib, mbx_sli4_parameters), - phba->fcp_embed_pbde, phba->fcp_embed_io, - phba->nvme_support, phba->nvme_embed_pbde, + phba->cfg_enable_pbde, + phba->fcp_embed_io, phba->nvme_support, phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 3b654ad08d1f..ea10f03437f5 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 47c02da11f01..deb094fdbb79 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index f69054afb986..dc272a936d7c 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h index b93e78f671fb..95d60ab5ebf9 100644 --- a/drivers/scsi/lpfc/lpfc_nl.h +++ b/drivers/scsi/lpfc/lpfc_nl.h @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2010 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 31089721990b..6c30ff9d7ea1 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -1062,6 +1062,9 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */ + if (vport->phba->sli_rev == LPFC_SLI_REV3) + ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag; /* software abort outstanding PLOGI */ lpfc_els_abort(vport->phba, ndlp); @@ -1982,12 +1985,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (bf_get_be32(prli_disc, nvpr)) ndlp->nlp_type |= NLP_NVME_DISCOVERY; - /* This node is an NVME target. Adjust the command - * queue depth on this node to not exceed the available - * xris. - */ - ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max; - /* * If prli_fba is set, the Target supports FirstBurst. * If prli_fb_sz is 0, the FirstBurst size is unlimited, diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 76a5a99605aa..028462e5994d 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1135,9 +1135,6 @@ out_err: else lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) - atomic_dec(&ndlp->cmd_pending); - /* Update stats and complete the IO. There is * no need for dma unprep because the nvme_transport * owns the dma address. @@ -1279,6 +1276,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, /* Word 9 */ bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); + /* Words 13 14 15 are for PBDE support */ + pwqeq->vport = vport; return 0; } @@ -1378,7 +1377,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, data_sg = sg_next(data_sg); sgl++; } - if (phba->nvme_embed_pbde) { + if (phba->cfg_enable_pbde) { /* Use PBDE support for first SGL only, offset == 0 */ /* Words 13-15 */ bde = (struct ulp_bde64 *) @@ -1394,10 +1393,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); bf_set(wqe_pbde, &wqe->generic.wqe_com, 0); } - } else { - bf_set(wqe_pbde, &wqe->generic.wqe_com, 0); - memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); + } else { /* For this clause to be valid, the payload_length * and sg_cnt must zero. */ @@ -1546,17 +1543,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, /* The node is shared with FCP IO, make sure the IO pending count does * not exceed the programmed depth. */ - if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && - !expedite) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, - "6174 Fail IO, ndlp qdepth exceeded: " - "idx %d DID %x pend %d qdepth %d\n", - lpfc_queue_info->index, ndlp->nlp_DID, - atomic_read(&ndlp->cmd_pending), - ndlp->cmd_qdepth); - atomic_inc(&lport->xmt_fcp_qdepth); - ret = -EBUSY; - goto out_fail; + if (lpfc_ndlp_check_qdepth(phba, ndlp)) { + if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && + !expedite) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6174 Fail IO, ndlp qdepth exceeded: " + "idx %d DID %x pend %d qdepth %d\n", + lpfc_queue_info->index, ndlp->nlp_DID, + atomic_read(&ndlp->cmd_pending), + ndlp->cmd_qdepth); + atomic_inc(&lport->xmt_fcp_qdepth); + ret = -EBUSY; + goto out_fail; + } } lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite); @@ -1614,8 +1613,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, goto out_free_nvme_buf; } - atomic_inc(&ndlp->cmd_pending); - lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_queue_info->index, ndlp->nlp_DID); @@ -1623,7 +1620,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); if (ret) { atomic_inc(&lport->xmt_fcp_wqerr); - atomic_dec(&ndlp->cmd_pending); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6113 Fail IO, Could not issue WQE err %x " "sid: x%x did: x%x oxid: x%x\n", @@ -2378,6 +2374,11 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, lpfc_ncmd = lpfc_nvme_buf(phba); } spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag); + + if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) { + atomic_inc(&ndlp->cmd_pending); + lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH; + } return lpfc_ncmd; } @@ -2396,7 +2397,13 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) { unsigned long iflag = 0; + if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp) + atomic_dec(&lpfc_ncmd->ndlp->cmd_pending); + lpfc_ncmd->nonsg_phys = 0; + lpfc_ncmd->ndlp = NULL; + lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH; + if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6310 XB release deferred for " @@ -2687,7 +2694,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) struct lpfc_nvme_rport *oldrport; struct nvme_fc_remote_port *remote_port; struct nvme_fc_port_info rpinfo; - struct lpfc_nodelist *prev_ndlp; + struct lpfc_nodelist *prev_ndlp = NULL; lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, "6006 Register NVME PORT. DID x%06x nlptype x%x\n", @@ -2736,23 +2743,29 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) spin_unlock_irq(&vport->phba->hbalock); rport = remote_port->private; if (oldrport) { + /* New remoteport record does not guarantee valid + * host private memory area. + */ + prev_ndlp = oldrport->ndlp; if (oldrport == remote_port->private) { - /* Same remoteport. Just reuse. */ + /* Same remoteport - ndlp should match. + * Just reuse. + */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, "6014 Rebinding lport to " "remoteport %p wwpn 0x%llx, " - "Data: x%x x%x %p x%x x%06x\n", + "Data: x%x x%x %p %p x%x x%06x\n", remote_port, remote_port->port_name, remote_port->port_id, remote_port->port_role, + prev_ndlp, ndlp, ndlp->nlp_type, ndlp->nlp_DID); return 0; } - prev_ndlp = rport->ndlp; /* Sever the ndlp<->rport association * before dropping the ndlp ref from @@ -2786,13 +2799,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NODE, "6022 Binding new rport to " - "lport %p Remoteport %p WWNN 0x%llx, " + "lport %p Remoteport %p rport %p WWNN 0x%llx, " "Rport WWPN 0x%llx DID " - "x%06x Role x%x, ndlp %p\n", - lport, remote_port, + "x%06x Role x%x, ndlp %p prev_ndlp %p\n", + lport, remote_port, rport, rpinfo.node_name, rpinfo.port_name, rpinfo.port_id, rpinfo.port_role, - ndlp); + ndlp, prev_ndlp); } else { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC | LOG_NODE, @@ -2970,7 +2983,7 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) struct lpfc_sli_ring *pring; u32 i, wait_cnt = 0; - if (phba->sli_rev < LPFC_SLI_REV4) + if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq) return; /* Cycle through all NVME rings and make sure all outstanding @@ -2979,6 +2992,9 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) for (i = 0; i < phba->cfg_nvme_io_channel; i++) { pring = phba->sli4_hba.nvme_wq[i]->pring; + if (!pring) + continue; + /* Retrieve everything on the txcmplq */ while (!list_empty(&pring->txcmplq)) { msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index a3dee9104981..5ff51ae55e4a 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -84,6 +84,7 @@ struct lpfc_nvme_buf { uint16_t flags; /* TBD convert exch_busy to flags */ #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ +#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ uint16_t status; /* From IOCB Word 7- ulpStatus */ uint16_t cpu; diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 7271c9d885dd..22f8a204b69f 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channsel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -402,6 +402,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) /* Process FCP command */ if (rc == 0) { + ctxp->rqb_buffer = NULL; atomic_inc(&tgtp->rcv_fcp_cmd_out); nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); return; @@ -1116,8 +1117,17 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", ctxp->oxid, ctxp->size, smp_processor_id()); + if (!nvmebuf) { + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6425 Defer rcv: no buffer xri x%x: " + "flg %x ste %x\n", + ctxp->oxid, ctxp->flag, ctxp->state); + return; + } + tgtp = phba->targetport->private; - atomic_inc(&tgtp->rcv_fcp_cmd_defer); + if (tgtp) + atomic_inc(&tgtp->rcv_fcp_cmd_defer); /* Free the nvmebuf since a new buffer already replaced it */ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); @@ -1732,9 +1742,12 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t *payload; uint32_t size, oxid, sid, rc; + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + if (!nvmebuf || !phba->targetport) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6154 LS Drop IO\n"); + "6154 LS Drop IO x%x\n", oxid); oxid = 0; size = 0; sid = 0; @@ -1744,9 +1757,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; payload = (uint32_t *)(nvmebuf->dbuf.virt); - fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); - oxid = be16_to_cpu(fc_hdr->fh_ox_id); sid = sli4_sid_from_fc_hdr(fc_hdr); ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); @@ -2492,7 +2503,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0); /* Word 11 - set pbde later */ - if (phba->nvme_embed_pbde) { + if (phba->cfg_enable_pbde) { do_pbde = 1; } else { bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0); @@ -2607,16 +2618,19 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, bf_set(lpfc_sli4_sge_last, sgl, 1); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(cnt); - if (do_pbde && i == 0) { + if (i == 0) { bde = (struct ulp_bde64 *)&wqe->words[13]; - memset(bde, 0, sizeof(struct ulp_bde64)); - /* Words 13-15 (PBDE)*/ - bde->addrLow = sgl->addr_lo; - bde->addrHigh = sgl->addr_hi; - bde->tus.f.bdeSize = - le32_to_cpu(sgl->sge_len); - bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bde->tus.w = cpu_to_le32(bde->tus.w); + if (do_pbde) { + /* Words 13-15 (PBDE) */ + bde->addrLow = sgl->addr_lo; + bde->addrHigh = sgl->addr_hi; + bde->tus.f.bdeSize = + le32_to_cpu(sgl->sge_len); + bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bde->tus.w = cpu_to_le32(bde->tus.w); + } else { + memset(bde, 0, sizeof(struct ulp_bde64)); + } } sgl++; ctxp->offset += cnt; @@ -3105,11 +3119,17 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, } aerr: - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + spin_lock_irqsave(&ctxp->ctxlock, flags); + if (ctxp->flag & LPFC_NVMET_CTX_RLS) + list_del(&ctxp->list); + ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS); + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", ctxp->oxid, rc); + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); return 1; } diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 81f520abfd64..1aaff63f1f41 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 00045daefb17..870f27e537b5 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -995,6 +995,11 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) spin_unlock(&phba->scsi_buf_list_put_lock); } spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); + + if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { + atomic_inc(&ndlp->cmd_pending); + lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; + } return lpfc_cmd; } /** @@ -1044,6 +1049,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); if (!found) return NULL; + + if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { + atomic_inc(&ndlp->cmd_pending); + lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; + } return lpfc_cmd; } /** @@ -1134,7 +1144,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) static void lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { + if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) + atomic_dec(&psb->ndlp->cmd_pending); + psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; phba->lpfc_release_scsi_buf(phba, psb); } @@ -3311,12 +3324,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) } /* * Setup the first Payload BDE. For FCoE we just key off - * Performance Hints, for FC we utilize fcp_embed_pbde. + * Performance Hints, for FC we use lpfc_enable_pbde. + * We populate words 13-15 of IOCB/WQE. */ if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || - phba->fcp_embed_pbde) { + phba->cfg_enable_pbde) { bde = (struct ulp_bde64 *) - &(iocb_cmd->unsli3.sli3Words[5]); + &(iocb_cmd->unsli3.sli3Words[5]); bde->addrLow = first_data_sgl->addr_lo; bde->addrHigh = first_data_sgl->addr_hi; bde->tus.f.bdeSize = @@ -3330,6 +3344,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) sgl->word2 = le32_to_cpu(sgl->word2); bf_set(lpfc_sli4_sge_last, sgl, 1); sgl->word2 = cpu_to_le32(sgl->word2); + + if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || + phba->cfg_enable_pbde) { + bde = (struct ulp_bde64 *) + &(iocb_cmd->unsli3.sli3Words[5]); + memset(bde, 0, (sizeof(uint32_t) * 3)); + } } /* @@ -4122,7 +4143,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { spin_lock_irqsave(shost->host_lock, flags); if (pnode && NLP_CHK_NODE_ACT(pnode)) { - atomic_dec(&pnode->cmd_pending); if (pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) && (atomic_read(&pnode->cmd_pending) > @@ -4135,8 +4155,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, pnode->last_change_time = jiffies; } spin_unlock_irqrestore(shost->host_lock, flags); - } else if (pnode && NLP_CHK_NODE_ACT(pnode)) { - atomic_dec(&pnode->cmd_pending); } lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); @@ -4530,6 +4548,11 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) int err; rdata = lpfc_rport_data_from_scsi_device(cmnd->device); + + /* sanity check on references */ + if (unlikely(!rdata) || unlikely(!rport)) + goto out_fail_command; + err = fc_remote_port_chkready(rport); if (err) { cmnd->result = err; @@ -4555,33 +4578,36 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) */ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) goto out_tgt_busy; - if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, - "3377 Target Queue Full, scsi Id:%d Qdepth:%d" - " Pending command:%d" - " WWNN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, " - " WWPN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", - ndlp->nlp_sid, ndlp->cmd_qdepth, - atomic_read(&ndlp->cmd_pending), - ndlp->nlp_nodename.u.wwn[0], - ndlp->nlp_nodename.u.wwn[1], - ndlp->nlp_nodename.u.wwn[2], - ndlp->nlp_nodename.u.wwn[3], - ndlp->nlp_nodename.u.wwn[4], - ndlp->nlp_nodename.u.wwn[5], - ndlp->nlp_nodename.u.wwn[6], - ndlp->nlp_nodename.u.wwn[7], - ndlp->nlp_portname.u.wwn[0], - ndlp->nlp_portname.u.wwn[1], - ndlp->nlp_portname.u.wwn[2], - ndlp->nlp_portname.u.wwn[3], - ndlp->nlp_portname.u.wwn[4], - ndlp->nlp_portname.u.wwn[5], - ndlp->nlp_portname.u.wwn[6], - ndlp->nlp_portname.u.wwn[7]); - goto out_tgt_busy; + if (lpfc_ndlp_check_qdepth(phba, ndlp)) { + if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, + "3377 Target Queue Full, scsi Id:%d " + "Qdepth:%d Pending command:%d" + " WWNN:%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x, " + " WWPN:%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x", + ndlp->nlp_sid, ndlp->cmd_qdepth, + atomic_read(&ndlp->cmd_pending), + ndlp->nlp_nodename.u.wwn[0], + ndlp->nlp_nodename.u.wwn[1], + ndlp->nlp_nodename.u.wwn[2], + ndlp->nlp_nodename.u.wwn[3], + ndlp->nlp_nodename.u.wwn[4], + ndlp->nlp_nodename.u.wwn[5], + ndlp->nlp_nodename.u.wwn[6], + ndlp->nlp_nodename.u.wwn[7], + ndlp->nlp_portname.u.wwn[0], + ndlp->nlp_portname.u.wwn[1], + ndlp->nlp_portname.u.wwn[2], + ndlp->nlp_portname.u.wwn[3], + ndlp->nlp_portname.u.wwn[4], + ndlp->nlp_portname.u.wwn[5], + ndlp->nlp_portname.u.wwn[6], + ndlp->nlp_portname.u.wwn[7]); + goto out_tgt_busy; + } } - atomic_inc(&ndlp->cmd_pending); lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp); if (lpfc_cmd == NULL) { @@ -4599,6 +4625,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) */ lpfc_cmd->pCmd = cmnd; lpfc_cmd->rdata = rdata; + lpfc_cmd->ndlp = ndlp; lpfc_cmd->timeout = 0; lpfc_cmd->start_time = jiffies; cmnd->host_scribble = (unsigned char *)lpfc_cmd; @@ -4681,7 +4708,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_release_scsi_buf(phba, lpfc_cmd); out_host_busy: - atomic_dec(&ndlp->cmd_pending); return SCSI_MLQUEUE_HOST_BUSY; out_tgt_busy: @@ -4714,7 +4740,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) struct lpfc_scsi_buf *lpfc_cmd; IOCB_t *cmd, *icmd; int ret = SUCCESS, status = 0; - struct lpfc_sli_ring *pring_s4; + struct lpfc_sli_ring *pring_s4 = NULL; int ret_val; unsigned long flags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); @@ -4744,8 +4770,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) } iocb = &lpfc_cmd->cur_iocbq; + if (phba->sli_rev == LPFC_SLI_REV4) { + if (!(phba->cfg_fof) || + (!(iocb->iocb_flag & LPFC_IO_FOF))) { + pring_s4 = + phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring; + } else { + iocb->hba_wqidx = 0; + pring_s4 = phba->sli4_hba.oas_wq->pring; + } + if (!pring_s4) { + ret = FAILED; + goto out_unlock; + } + spin_lock(&pring_s4->ring_lock); + } /* the command is in process of being cancelled */ if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring_s4->ring_lock); spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3169 SCSI Layer abort requested I/O has been " @@ -4759,6 +4802,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) * see the completion before the eh fired. Just return SUCCESS. */ if (lpfc_cmd->pCmd != cmnd) { + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring_s4->ring_lock); lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3170 SCSI Layer abort requested I/O has been " "completed by LLD.\n"); @@ -4771,6 +4816,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3389 SCSI Layer I/O Abort Request is pending\n"); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring_s4->ring_lock); spin_unlock_irqrestore(&phba->hbalock, flags); goto wait_for_cmpl; } @@ -4778,6 +4825,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb = __lpfc_sli_get_iocbq(phba); if (abtsiocb == NULL) { ret = FAILED; + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring_s4->ring_lock); goto out_unlock; } @@ -4815,14 +4864,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; abtsiocb->vport = vport; + lpfc_cmd->waitq = &waitq; if (phba->sli_rev == LPFC_SLI_REV4) { - pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb); - if (pring_s4 == NULL) { - ret = FAILED; - goto out_unlock; - } /* Note: both hbalock and ring_lock must be set here */ - spin_lock(&pring_s4->ring_lock); ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, abtsiocb, 0); spin_unlock(&pring_s4->ring_lock); @@ -4835,6 +4879,17 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) if (ret_val == IOCB_ERROR) { + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock_irqsave(&pring_s4->ring_lock, flags); + else + spin_lock_irqsave(&phba->hbalock, flags); + /* Indicate the IO is not being aborted by the driver. */ + iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; + lpfc_cmd->waitq = NULL; + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock_irqrestore(&pring_s4->ring_lock, flags); + else + spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; @@ -4845,7 +4900,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); wait_for_cmpl: - lpfc_cmd->waitq = &waitq; /* Wait for abort to complete */ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), @@ -5006,6 +5060,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; lpfc_cmd->rdata = rdata; lpfc_cmd->pCmd = cmnd; + lpfc_cmd->ndlp = pnode; status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, task_mgmt_cmd); diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index c38e4da71f5f..cc99859774ff 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -134,11 +134,13 @@ struct lpfc_scsi_buf { struct list_head list; struct scsi_cmnd *pCmd; struct lpfc_rport_data *rdata; + struct lpfc_nodelist *ndlp; uint32_t timeout; uint16_t flags; /* TBD convert exch_busy to flags */ #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ +#define LPFC_SBUF_BUMP_QDEPTH 0x8 /* bumped queue depth counter */ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ uint16_t status; /* From IOCB Word 7- ulpStatus */ uint32_t result; /* From IOCB Word 4. */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index d4510e64856a..234951ed61f5 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -145,6 +145,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) uint32_t idx; uint32_t i = 0; uint8_t *tmp; + u32 if_type; /* sanity check on queue memory */ if (unlikely(!q)) @@ -199,8 +200,14 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) q->queue_id); } else { bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); - bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); + + /* Leave bits <23:16> clear for if_type 6 dpp */ + if_type = bf_get(lpfc_sli_intf_if_type, + &q->phba->sli4_hba.sli_intf); + if (if_type != LPFC_SLI_INTF_IF_TYPE_6) + bf_set(lpfc_wq_db_list_fm_index, &doorbell, + host_index); } } else if (q->db_format == LPFC_DB_RING_FORMAT) { bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); @@ -9116,8 +9123,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, } /* Note, word 10 is already initialized to 0 */ - /* Don't set PBDE for Perf hints, just fcp_embed_pbde */ - if (phba->fcp_embed_pbde) + /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ + if (phba->cfg_enable_pbde) bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); else bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); @@ -9180,8 +9187,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, } /* Note, word 10 is already initialized to 0 */ - /* Don't set PBDE for Perf hints, just fcp_embed_pbde */ - if (phba->fcp_embed_pbde) + /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ + if (phba->cfg_enable_pbde) bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); else bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); @@ -10702,6 +10709,12 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_lock_irq(&phba->hbalock); if (phba->sli_rev < LPFC_SLI_REV4) { + if (irsp->ulpCommand == CMD_ABORT_XRI_CX && + irsp->ulpStatus == IOSTAT_LOCAL_REJECT && + irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) { + spin_unlock_irq(&phba->hbalock); + goto release_iocb; + } if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) abort_iocb = @@ -10723,6 +10736,7 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_unlock_irq(&phba->hbalock); } +release_iocb: lpfc_sli_release_iocbq(phba, cmdiocb); return; } @@ -10779,6 +10793,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, IOCB_t *iabt = NULL; int retval; unsigned long iflags; + struct lpfc_nodelist *ndlp; lockdep_assert_held(&phba->hbalock); @@ -10809,9 +10824,13 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (phba->sli_rev == LPFC_SLI_REV4) { iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; iabt->un.acxri.abortContextTag = cmdiocb->iotag; - } - else + } else { iabt->un.acxri.abortIoTag = icmd->ulpIoTag; + if (pring->ringno == LPFC_ELS_RING) { + ndlp = (struct lpfc_nodelist *)(cmdiocb->context1); + iabt->un.acxri.abortContextTag = ndlp->nlp_rpi; + } + } iabt->ulpLe = 1; iabt->ulpClass = icmd->ulpClass; @@ -11090,10 +11109,11 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd; int rc = 1; - if (!(iocbq->iocb_flag & LPFC_IO_FCP)) + if (iocbq->vport != vport) return rc; - if (iocbq->vport != vport) + if (!(iocbq->iocb_flag & LPFC_IO_FCP) || + !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) return rc; lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); @@ -11103,13 +11123,13 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, switch (ctx_cmd) { case LPFC_CTX_LUN: - if ((lpfc_cmd->rdata->pnode) && + if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) rc = 0; break; case LPFC_CTX_TGT: - if ((lpfc_cmd->rdata->pnode) && + if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) rc = 0; break; @@ -11224,6 +11244,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, int errcnt = 0, ret_val = 0; int i; + /* all I/Os are in process of being flushed */ + if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) + return errcnt; + for (i = 1; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 431754195505..f37f0f33ef2a 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index cf64aca82bd0..399c0015c546 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -490,6 +490,7 @@ struct lpfc_pc_sli4_params { uint8_t eqav; uint8_t cqav; uint8_t wqsize; + uint8_t bv1s; #define LPFC_WQ_SZ64_SUPPORT 1 #define LPFC_WQ_SZ128_SUPPORT 2 uint8_t wqpcnt; @@ -774,7 +775,9 @@ struct lpfc_rdp_context { struct lpfc_lcb_context { uint8_t sub_command; uint8_t type; + uint8_t capability; uint8_t frequency; + uint16_t duration; uint16_t ox_id; uint16_t rx_id; struct lpfc_nodelist *ndlp; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 18c23afcf46b..501249509af4 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "12.0.0.4" +#define LPFC_DRIVER_VERSION "12.0.0.6" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ @@ -33,5 +33,5 @@ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION #define LPFC_COPYRIGHT "Copyright (C) 2017-2018 Broadcom. All Rights " \ - "Reserved. The term \"Broadcom\" refers to Broadcom Limited " \ + "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \ "and/or its subsidiaries." diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index c9d33b1268cb..48d19d9fbcbb 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index 62295971f66c..f4b8528dd2e7 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2006 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index dc3a0542a2e8..e97bf2670315 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -483,6 +483,8 @@ struct pqi_raid_error_info { #define CISS_CMD_STATUS_TMF 0xd #define CISS_CMD_STATUS_AIO_DISABLED 0xe +#define PQI_CMD_STATUS_ABORTED CISS_CMD_STATUS_ABORTED + #define PQI_NUM_EVENT_QUEUE_ELEMENTS 32 #define PQI_EVENT_OQ_ELEMENT_LENGTH sizeof(struct pqi_event_response) @@ -581,8 +583,8 @@ struct pqi_admin_queues_aligned { struct pqi_admin_queues { void *iq_element_array; void *oq_element_array; - volatile pqi_index_t *iq_ci; - volatile pqi_index_t *oq_pi; + pqi_index_t *iq_ci; + pqi_index_t __iomem *oq_pi; dma_addr_t iq_element_array_bus_addr; dma_addr_t oq_element_array_bus_addr; dma_addr_t iq_ci_bus_addr; @@ -606,8 +608,8 @@ struct pqi_queue_group { dma_addr_t oq_element_array_bus_addr; __le32 __iomem *iq_pi[2]; pqi_index_t iq_pi_copy[2]; - volatile pqi_index_t *iq_ci[2]; - volatile pqi_index_t *oq_pi; + pqi_index_t __iomem *iq_ci[2]; + pqi_index_t __iomem *oq_pi; dma_addr_t iq_ci_bus_addr[2]; dma_addr_t oq_pi_bus_addr; __le32 __iomem *oq_ci; @@ -620,7 +622,7 @@ struct pqi_event_queue { u16 oq_id; u16 int_msg_num; void *oq_element_array; - volatile pqi_index_t *oq_pi; + pqi_index_t __iomem *oq_pi; dma_addr_t oq_element_array_bus_addr; dma_addr_t oq_pi_bus_addr; __le32 __iomem *oq_ci; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 8fe918398336..9295f4842c7b 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -40,11 +40,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "1.1.2-126" +#define DRIVER_VERSION "1.1.4-130" #define DRIVER_MAJOR 1 #define DRIVER_MINOR 1 -#define DRIVER_RELEASE 2 -#define DRIVER_REVISION 126 +#define DRIVER_RELEASE 4 +#define DRIVER_REVISION 130 #define DRIVER_NAME "Microsemi PQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -1197,20 +1197,30 @@ no_buffer: device->volume_offline = volume_offline; } +#define PQI_INQUIRY_PAGE0_RETRIES 3 + static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { int rc; u8 *buffer; + unsigned int retries; buffer = kmalloc(64, GFP_KERNEL); if (!buffer) return -ENOMEM; /* Send an inquiry to the device to see what it is. */ - rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); - if (rc) - goto out; + for (retries = 0;;) { + rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, + buffer, 64); + if (rc == 0) + break; + if (pqi_is_logical_device(device) || + rc != PQI_CMD_STATUS_ABORTED || + ++retries > PQI_INQUIRY_PAGE0_RETRIES) + goto out; + } scsi_sanitize_inquiry_string(&buffer[8], 8); scsi_sanitize_inquiry_string(&buffer[16], 16); @@ -2692,7 +2702,7 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, oq_ci = queue_group->oq_ci_copy; while (1) { - oq_pi = *queue_group->oq_pi; + oq_pi = readl(queue_group->oq_pi); if (oq_pi == oq_ci) break; @@ -2783,7 +2793,7 @@ static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); iq_pi = queue_group->iq_pi_copy[RAID_PATH]; - iq_ci = *queue_group->iq_ci[RAID_PATH]; + iq_ci = readl(queue_group->iq_ci[RAID_PATH]); if (pqi_num_elements_free(iq_pi, iq_ci, ctrl_info->num_elements_per_iq)) @@ -2943,7 +2953,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) oq_ci = event_queue->oq_ci_copy; while (1) { - oq_pi = *event_queue->oq_pi; + oq_pi = readl(event_queue->oq_pi); if (oq_pi == oq_ci) break; @@ -3167,7 +3177,7 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) size_t element_array_length_per_iq; size_t element_array_length_per_oq; void *element_array; - void *next_queue_index; + void __iomem *next_queue_index; void *aligned_pointer; unsigned int num_inbound_queues; unsigned int num_outbound_queues; @@ -3263,7 +3273,7 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * PQI_EVENT_OQ_ELEMENT_LENGTH; - next_queue_index = PTR_ALIGN(element_array, + next_queue_index = (void __iomem *)PTR_ALIGN(element_array, PQI_OPERATIONAL_INDEX_ALIGNMENT); for (i = 0; i < ctrl_info->num_queue_groups; i++) { @@ -3271,21 +3281,24 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) queue_group->iq_ci[RAID_PATH] = next_queue_index; queue_group->iq_ci_bus_addr[RAID_PATH] = ctrl_info->queue_memory_base_dma_handle + - (next_queue_index - ctrl_info->queue_memory_base); + (next_queue_index - + (void __iomem *)ctrl_info->queue_memory_base); next_queue_index += sizeof(pqi_index_t); next_queue_index = PTR_ALIGN(next_queue_index, PQI_OPERATIONAL_INDEX_ALIGNMENT); queue_group->iq_ci[AIO_PATH] = next_queue_index; queue_group->iq_ci_bus_addr[AIO_PATH] = ctrl_info->queue_memory_base_dma_handle + - (next_queue_index - ctrl_info->queue_memory_base); + (next_queue_index - + (void __iomem *)ctrl_info->queue_memory_base); next_queue_index += sizeof(pqi_index_t); next_queue_index = PTR_ALIGN(next_queue_index, PQI_OPERATIONAL_INDEX_ALIGNMENT); queue_group->oq_pi = next_queue_index; queue_group->oq_pi_bus_addr = ctrl_info->queue_memory_base_dma_handle + - (next_queue_index - ctrl_info->queue_memory_base); + (next_queue_index - + (void __iomem *)ctrl_info->queue_memory_base); next_queue_index += sizeof(pqi_index_t); next_queue_index = PTR_ALIGN(next_queue_index, PQI_OPERATIONAL_INDEX_ALIGNMENT); @@ -3294,7 +3307,8 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) ctrl_info->event_queue.oq_pi = next_queue_index; ctrl_info->event_queue.oq_pi_bus_addr = ctrl_info->queue_memory_base_dma_handle + - (next_queue_index - ctrl_info->queue_memory_base); + (next_queue_index - + (void __iomem *)ctrl_info->queue_memory_base); return 0; } @@ -3368,7 +3382,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) admin_queues->oq_element_array = &admin_queues_aligned->oq_element_array; admin_queues->iq_ci = &admin_queues_aligned->iq_ci; - admin_queues->oq_pi = &admin_queues_aligned->oq_pi; + admin_queues->oq_pi = + (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; admin_queues->iq_element_array_bus_addr = ctrl_info->admin_queue_memory_base_dma_handle + @@ -3384,8 +3399,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) ctrl_info->admin_queue_memory_base); admin_queues->oq_pi_bus_addr = ctrl_info->admin_queue_memory_base_dma_handle + - ((void *)admin_queues->oq_pi - - ctrl_info->admin_queue_memory_base); + ((void __iomem *)admin_queues->oq_pi - + (void __iomem *)ctrl_info->admin_queue_memory_base); return 0; } @@ -3486,7 +3501,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; while (1) { - oq_pi = *admin_queues->oq_pi; + oq_pi = readl(admin_queues->oq_pi); if (oq_pi != oq_ci) break; if (time_after(jiffies, timeout)) { @@ -3545,7 +3560,7 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, DIV_ROUND_UP(iu_length, PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); - iq_ci = *queue_group->iq_ci[path]; + iq_ci = readl(queue_group->iq_ci[path]); if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, ctrl_info->num_elements_per_iq)) @@ -3621,29 +3636,24 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, complete(waiting); } -static int pqi_submit_raid_request_synchronous_with_io_request( - struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, - unsigned long timeout_msecs) +static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info + *error_info) { - int rc = 0; - DECLARE_COMPLETION_ONSTACK(wait); - - io_request->io_complete_callback = pqi_raid_synchronous_complete; - io_request->context = &wait; + int rc = -EIO; - pqi_start_io(ctrl_info, - &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, - io_request); - - if (timeout_msecs == NO_TIMEOUT) { - pqi_wait_for_completion_io(ctrl_info, &wait); - } else { - if (!wait_for_completion_io_timeout(&wait, - msecs_to_jiffies(timeout_msecs))) { - dev_warn(&ctrl_info->pci_dev->dev, - "command timed out\n"); - rc = -ETIMEDOUT; - } + switch (error_info->data_out_result) { + case PQI_DATA_IN_OUT_GOOD: + if (error_info->status == SAM_STAT_GOOD) + rc = 0; + break; + case PQI_DATA_IN_OUT_UNDERFLOW: + if (error_info->status == SAM_STAT_GOOD || + error_info->status == SAM_STAT_CHECK_CONDITION) + rc = 0; + break; + case PQI_DATA_IN_OUT_ABORTED: + rc = PQI_CMD_STATUS_ABORTED; + break; } return rc; @@ -3653,11 +3663,12 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, struct pqi_iu_header *request, unsigned int flags, struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) { - int rc; + int rc = 0; struct pqi_io_request *io_request; unsigned long start_jiffies; unsigned long msecs_blocked; size_t iu_length; + DECLARE_COMPLETION_ONSTACK(wait); /* * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value @@ -3686,11 +3697,13 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, pqi_ctrl_busy(ctrl_info); timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); if (timeout_msecs == 0) { + pqi_ctrl_unbusy(ctrl_info); rc = -ETIMEDOUT; goto out; } if (pqi_ctrl_offline(ctrl_info)) { + pqi_ctrl_unbusy(ctrl_info); rc = -ENXIO; goto out; } @@ -3708,8 +3721,25 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, PQI_REQUEST_HEADER_LENGTH; memcpy(io_request->iu, request, iu_length); - rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info, - io_request, timeout_msecs); + io_request->io_complete_callback = pqi_raid_synchronous_complete; + io_request->context = &wait; + + pqi_start_io(ctrl_info, + &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, + io_request); + + pqi_ctrl_unbusy(ctrl_info); + + if (timeout_msecs == NO_TIMEOUT) { + pqi_wait_for_completion_io(ctrl_info, &wait); + } else { + if (!wait_for_completion_io_timeout(&wait, + msecs_to_jiffies(timeout_msecs))) { + dev_warn(&ctrl_info->pci_dev->dev, + "command timed out\n"); + rc = -ETIMEDOUT; + } + } if (error_info) { if (io_request->error_info) @@ -3718,25 +3748,13 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, else memset(error_info, 0, sizeof(*error_info)); } else if (rc == 0 && io_request->error_info) { - u8 scsi_status; - struct pqi_raid_error_info *raid_error_info; - - raid_error_info = io_request->error_info; - scsi_status = raid_error_info->status; - - if (scsi_status == SAM_STAT_CHECK_CONDITION && - raid_error_info->data_out_result == - PQI_DATA_IN_OUT_UNDERFLOW) - scsi_status = SAM_STAT_GOOD; - - if (scsi_status != SAM_STAT_GOOD) - rc = -EIO; + rc = pqi_process_raid_io_error_synchronous( + io_request->error_info); } pqi_free_io_request(io_request); out: - pqi_ctrl_unbusy(ctrl_info); up(&ctrl_info->sync_request_sem); return rc; @@ -3899,29 +3917,6 @@ static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) return 0; } -static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info, - bool inbound_queue, u16 queue_id) -{ - struct pqi_general_admin_request request; - struct pqi_general_admin_response response; - - memset(&request, 0, sizeof(request)); - request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; - put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, - &request.header.iu_length); - if (inbound_queue) - request.function_code = - PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ; - else - request.function_code = - PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ; - put_unaligned_le16(queue_id, - &request.data.delete_operational_queue.queue_id); - - return pqi_submit_admin_request_synchronous(ctrl_info, &request, - &response); -} - static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) { int rc; @@ -4039,7 +4034,7 @@ static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, if (rc) { dev_err(&ctrl_info->pci_dev->dev, "error creating inbound AIO queue\n"); - goto delete_inbound_queue_raid; + return rc; } queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + @@ -4067,7 +4062,7 @@ static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, if (rc) { dev_err(&ctrl_info->pci_dev->dev, "error changing queue property\n"); - goto delete_inbound_queue_aio; + return rc; } /* @@ -4097,7 +4092,7 @@ static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, if (rc) { dev_err(&ctrl_info->pci_dev->dev, "error creating outbound queue\n"); - goto delete_inbound_queue_aio; + return rc; } queue_group->oq_ci = ctrl_info->iomem_base + @@ -4106,16 +4101,6 @@ static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, &response.data.create_operational_oq.oq_ci_offset); return 0; - -delete_inbound_queue_aio: - pqi_delete_operational_queue(ctrl_info, true, - queue_group->iq_id[AIO_PATH]); - -delete_inbound_queue_raid: - pqi_delete_operational_queue(ctrl_info, true, - queue_group->iq_id[RAID_PATH]); - - return rc; } static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) @@ -5073,7 +5058,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) iq_pi = queue_group->iq_pi_copy[path]; while (1) { - iq_ci = *queue_group->iq_ci[path]; + iq_ci = readl(queue_group->iq_ci[path]); if (iq_ci == iq_pi) break; pqi_check_ctrl_health(ctrl_info); @@ -6262,20 +6247,20 @@ static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) admin_queues = &ctrl_info->admin_queues; admin_queues->iq_pi_copy = 0; admin_queues->oq_ci_copy = 0; - *admin_queues->oq_pi = 0; + writel(0, admin_queues->oq_pi); for (i = 0; i < ctrl_info->num_queue_groups; i++) { ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; ctrl_info->queue_groups[i].oq_ci_copy = 0; - *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0; - *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0; - *ctrl_info->queue_groups[i].oq_pi = 0; + writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); + writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); + writel(0, ctrl_info->queue_groups[i].oq_pi); } event_queue = &ctrl_info->event_queue; - *event_queue->oq_pi = 0; + writel(0, event_queue->oq_pi); event_queue->oq_ci_copy = 0; } @@ -6798,6 +6783,14 @@ static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) static const struct pci_device_id pqi_pci_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x105b, 0x1211) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x105b, 0x1321) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x152d, 0x8a22) }, { @@ -6818,6 +6811,50 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x8460) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x8461) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0xf460) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0xf461) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0045) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0046) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0047) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0048) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x004a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x004b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x004c) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x0110) }, { @@ -6918,6 +6955,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1282) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x1300) }, { @@ -6938,6 +6979,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADVANTECH, 0x8312) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_DELL, 0x1fe0) }, { diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 7777487e4026..a586866debc5 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -955,7 +955,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); if (sbi->s_mount_opt & EXT2_MOUNT_DAX) { - err = bdev_dax_supported(sb, blocksize); + err = ____bdev_dax_supported(sb->s_bdev, blocksize); if (err) goto failed_mount; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index edc1f1eb84d3..1839eac6af82 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3724,7 +3724,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) " that may contain inline data"); goto failed_mount; } - err = bdev_dax_supported(sb, blocksize); + err = ____bdev_dax_supported(sb->s_bdev, blocksize); if (err) goto failed_mount; } diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index d5bd903c0a5f..e67864b2a145 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1102,7 +1102,8 @@ xfs_ioctl_setattr_dax_invalidate( if (fa->fsx_xflags & FS_XFLAG_DAX) { if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) return -EINVAL; - if (bdev_dax_supported(sb, sb->s_blocksize) < 0) + if (____bdev_dax_supported(xfs_find_bdev_for_inode(VFS_I(ip)), + sb->s_blocksize) < 0) return -EINVAL; } diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index a960c84cde2c..8e2c43c83a34 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -1184,6 +1184,30 @@ static const struct inode_operations xfs_inline_symlink_inode_operations = { .update_time = xfs_vn_update_time, }; +/* Figure out if this file actually supports DAX. */ +static bool +xfs_inode_supports_dax( + struct xfs_inode *ip) +{ + struct xfs_mount *mp = ip->i_mount; + + /* Only supported on non-reflinked files. */ + if (!S_ISREG(VFS_I(ip)->i_mode) || xfs_is_reflink_inode(ip)) + return false; + + /* DAX mount option or DAX iflag must be set. */ + if (!(mp->m_flags & XFS_MOUNT_DAX) && + !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) + return false; + + /* Block size must match page size */ + if (mp->m_sb.sb_blocksize != PAGE_SIZE) + return false; + + /* Device has to support DAX too. */ + return xfs_find_daxdev_for_inode(VFS_I(ip)) != NULL; +} + STATIC void xfs_diflags_to_iflags( struct inode *inode, @@ -1202,11 +1226,7 @@ xfs_diflags_to_iflags( inode->i_flags |= S_SYNC; if (flags & XFS_DIFLAG_NOATIME) inode->i_flags |= S_NOATIME; - if (S_ISREG(inode->i_mode) && - ip->i_mount->m_sb.sb_blocksize == PAGE_SIZE && - !xfs_is_reflink_inode(ip) && - (ip->i_mount->m_flags & XFS_MOUNT_DAX || - ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) + if (xfs_inode_supports_dax(ip)) inode->i_flags |= S_DAX; } diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 6fd948774f2c..57ccf20ab8b9 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1658,9 +1658,15 @@ xfs_fs_fill_super( sb->s_flags |= MS_I_VERSION; if (mp->m_flags & XFS_MOUNT_DAX) { + int error2 = 0; - error = bdev_dax_supported(sb, sb->s_blocksize); - if (error) { + + error = ____bdev_dax_supported(mp->m_ddev_targp->bt_bdev, + sb->s_blocksize); + if (mp->m_rtdev_targp) + error2 = ____bdev_dax_supported(mp->m_rtdev_targp->bt_bdev, + sb->s_blocksize); + if (error && error2) { xfs_alert(mp, "DAX unsupported by block device. Turning off DAX."); mp->m_flags &= ~XFS_MOUNT_DAX; diff --git a/include/linux/ata.h b/include/linux/ata.h index 34ad4eacf013..6200af217ec7 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -448,6 +448,8 @@ enum { ATA_SET_MAX_LOCK = 0x02, ATA_SET_MAX_UNLOCK = 0x03, ATA_SET_MAX_FREEZE_LOCK = 0x04, + ATA_SET_MAX_PASSWD_DMA = 0x05, + ATA_SET_MAX_UNLOCK_DMA = 0x06, /* feature values for DEVICE CONFIGURATION OVERLAY */ ATA_DCO_RESTORE = 0xC0, diff --git a/include/linux/dax.h b/include/linux/dax.h index 7ce873fa7196..1c89b6f52b3e 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -25,43 +25,21 @@ extern struct attribute_group dax_attribute_group; #if IS_ENABLED(CONFIG_DAX) struct dax_device *dax_get_by_host(const char *host); -struct dax_device *alloc_dax(void *private, const char *host, - const struct dax_operations *ops); void put_dax(struct dax_device *dax_dev); -void kill_dax(struct dax_device *dax_dev); -void dax_write_cache(struct dax_device *dax_dev, bool wc); -bool dax_write_cache_enabled(struct dax_device *dax_dev); #else static inline struct dax_device *dax_get_by_host(const char *host) { return NULL; } -static inline struct dax_device *alloc_dax(void *private, const char *host, - const struct dax_operations *ops) -{ - /* - * Callers should check IS_ENABLED(CONFIG_DAX) to know if this - * NULL is an error or expected. - */ - return NULL; -} + static inline void put_dax(struct dax_device *dax_dev) { } -static inline void kill_dax(struct dax_device *dax_dev) -{ -} -static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) -{ -} -static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) -{ - return false; -} #endif int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); #if IS_ENABLED(CONFIG_FS_DAX) +int ____bdev_dax_supported(struct block_device *bdev, int blocksize); int __bdev_dax_supported(struct super_block *sb, int blocksize); static inline int bdev_dax_supported(struct super_block *sb, int blocksize) { @@ -80,7 +58,9 @@ static inline void fs_put_dax(struct dax_device *dax_dev) struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); #else -static inline int bdev_dax_supported(struct super_block *sb, int blocksize) +int ____bdev_dax_supported(struct block_device *bdev, int blocksize); +static inline int bdev_dax_supported(struct super_block *sb, + int blocksize) { return -EOPNOTSUPP; } @@ -102,13 +82,18 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) int dax_read_lock(void); void dax_read_unlock(int id); +struct dax_device *alloc_dax(void *private, const char *host, + const struct dax_operations *ops); bool dax_alive(struct dax_device *dax_dev); +void kill_dax(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); +void dax_write_cache(struct dax_device *dax_dev, bool wc); +bool dax_write_cache_enabled(struct dax_device *dax_dev); /* * We use lowest available bit in exceptional entry for locking, one bit for diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 59e76517934c..3c67737d65a5 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -458,7 +458,7 @@ struct nvme_ana_group_desc { __le32 nnsids; __le64 chgcnt; __u8 state; - __u8 rsvd17[7]; + __u8 rsvd17[15]; __le32 nsids[]; }; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0ac3f9c7e787..783f0ce6a54c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5839,17 +5839,19 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, return target; } -static inline int task_util(struct task_struct *p); -static int cpu_util_wake(int cpu, struct task_struct *p); +static inline unsigned long task_util(struct task_struct *p); +static unsigned long cpu_util_wake(int cpu, struct task_struct *p); static unsigned long capacity_spare_wake(int cpu, struct task_struct *p) { - return capacity_orig_of(cpu) - cpu_util_wake(cpu, p); + return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0); } /* * find_idlest_group finds and returns the least busy CPU group within the * domain. + * + * Assumes p is allowed on at least one CPU in sd. */ static struct sched_group * find_idlest_group(struct sched_domain *sd, struct task_struct *p, @@ -5857,8 +5859,9 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, { struct sched_group *idlest = NULL, *group = sd->groups; struct sched_group *most_spare_sg = NULL; - unsigned long min_runnable_load = ULONG_MAX, this_runnable_load = 0; - unsigned long min_avg_load = ULONG_MAX, this_avg_load = 0; + unsigned long min_runnable_load = ULONG_MAX; + unsigned long this_runnable_load = ULONG_MAX; + unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX; unsigned long most_spare = 0, this_spare = 0; int load_idx = sd->forkexec_idx; int imbalance_scale = 100 + (sd->imbalance_pct-100)/2; @@ -5991,10 +5994,10 @@ skip_spare: } /* - * find_idlest_cpu - find the idlest cpu among the cpus in group. + * find_idlest_group_cpu - find the idlest cpu among the cpus in group. */ static int -find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) +find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) { unsigned long load, min_load = ULONG_MAX; unsigned int min_exit_latency = UINT_MAX; @@ -6033,7 +6036,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) } } else if (shallowest_idle_cpu == -1) { load = weighted_cpuload(cpu_rq(i)); - if (load < min_load || (load == min_load && i == this_cpu)) { + if (load < min_load) { min_load = load; least_loaded_cpu = i; } @@ -6043,6 +6046,53 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; } +static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, + int cpu, int prev_cpu, int sd_flag) +{ + int new_cpu = cpu; + + if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) + return prev_cpu; + + while (sd) { + struct sched_group *group; + struct sched_domain *tmp; + int weight; + + if (!(sd->flags & sd_flag)) { + sd = sd->child; + continue; + } + + group = find_idlest_group(sd, p, cpu, sd_flag); + if (!group) { + sd = sd->child; + continue; + } + + new_cpu = find_idlest_group_cpu(group, p, cpu); + if (new_cpu == cpu) { + /* Now try balancing at a lower domain level of cpu */ + sd = sd->child; + continue; + } + + /* Now try balancing at a lower domain level of new_cpu */ + cpu = new_cpu; + weight = sd->span_weight; + sd = NULL; + for_each_domain(cpu, tmp) { + if (weight <= tmp->span_weight) + break; + if (tmp->flags & sd_flag) + sd = tmp; + } + /* while loop will break here if sd == NULL */ + } + + return new_cpu; +} + #ifdef CONFIG_SCHED_SMT static inline void set_idle_cores(int cpu, int val) @@ -6298,7 +6348,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) * capacity_orig) as it useful for predicting the capacity required after task * migrations (scheduler-driven DVFS). */ -static int cpu_util(int cpu) +static unsigned long cpu_util(int cpu) { unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg; unsigned long capacity = capacity_orig_of(cpu); @@ -6306,7 +6356,7 @@ static int cpu_util(int cpu) return (util >= capacity) ? capacity : util; } -static inline int task_util(struct task_struct *p) +static inline unsigned long task_util(struct task_struct *p) { return p->se.avg.util_avg; } @@ -6315,7 +6365,7 @@ static inline int task_util(struct task_struct *p) * cpu_util_wake: Compute cpu utilization with any contributions from * the waking task p removed. */ -static int cpu_util_wake(int cpu, struct task_struct *p) +static unsigned long cpu_util_wake(int cpu, struct task_struct *p) { unsigned long util, capacity; @@ -6426,39 +6476,8 @@ pick_cpu: if (want_affine) current->recent_used_cpu = cpu; } - } else while (sd) { - struct sched_group *group; - int weight; - - if (!(sd->flags & sd_flag)) { - sd = sd->child; - continue; - } - - group = find_idlest_group(sd, p, cpu, sd_flag); - if (!group) { - sd = sd->child; - continue; - } - - new_cpu = find_idlest_cpu(group, p, cpu); - if (new_cpu == -1 || new_cpu == cpu) { - /* Now try balancing at a lower domain level of cpu */ - sd = sd->child; - continue; - } - - /* Now try balancing at a lower domain level of new_cpu */ - cpu = new_cpu; - weight = sd->span_weight; - sd = NULL; - for_each_domain(cpu, tmp) { - if (weight <= tmp->span_weight) - break; - if (tmp->flags & sd_flag) - sd = tmp; - } - /* while loop will break here if sd == NULL */ + } else { + new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); } rcu_read_unlock(); @@ -9118,6 +9137,10 @@ void nohz_balance_enter_idle(int cpu) if (!cpu_active(cpu)) return; + /* Spare idle load balancing on CPUs that don't want to be disturbed: */ + if (!is_housekeeping_cpu(cpu)) + return; + if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) return; |