提交 01e0d603 编写于 作者: L Linus Torvalds

Merge tag 'ntb-4.10' of git://github.com/jonmason/ntb

Pull NTB update from Jon Mason:

 - NTB bug fixes for removing an unnecessary call to ntb_peer_spad_read,
   and correcting a free_irq inconsistency

 - add Intel SKX support

 - change the AMD NTB maintainer, and fix some bugs present there

* tag 'ntb-4.10' of git://github.com/jonmason/ntb:
  ntb_transport: Remove unnecessary call to ntb_peer_spad_read
  NTB: Fix 'request_irq()' and 'free_irq()' inconsistancy
  ntb: fix SKX NTB config space size register offsets
  NTB: correct ntb_peer_spad_read for case when callback is not supplied.
  MAINTAINERS: Change in maintainer for AMD NTB
  ntb_transport: Limit memory windows based on available, scratchpads
  NTB: Register and offset values fix for memory window
  NTB: add support for hotplug feature
  ntb: Adding Skylake Xeon NTB support
...@@ -8828,7 +8828,7 @@ T: git git://github.com/jonmason/ntb.git ...@@ -8828,7 +8828,7 @@ T: git git://github.com/jonmason/ntb.git
F: drivers/ntb/hw/intel/ F: drivers/ntb/hw/intel/
NTB AMD DRIVER NTB AMD DRIVER
M: Xiangliang Yu <Xiangliang.Yu@amd.com> M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
L: linux-ntb@googlegroups.com L: linux-ntb@googlegroups.com
S: Supported S: Supported
F: drivers/ntb/hw/amd/ F: drivers/ntb/hw/amd/
......
...@@ -138,11 +138,11 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, ...@@ -138,11 +138,11 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
base_addr = pci_resource_start(ndev->ntb.pdev, bar); base_addr = pci_resource_start(ndev->ntb.pdev, bar);
if (bar != 1) { if (bar != 1) {
xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 3); xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2);
limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 3); limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2);
/* Set the limit if supported */ /* Set the limit if supported */
limit = base_addr + size; limit = size;
/* set and verify setting the translation address */ /* set and verify setting the translation address */
write64(addr, peer_mmio + xlat_reg); write64(addr, peer_mmio + xlat_reg);
...@@ -164,14 +164,8 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, ...@@ -164,14 +164,8 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
xlat_reg = AMD_BAR1XLAT_OFFSET; xlat_reg = AMD_BAR1XLAT_OFFSET;
limit_reg = AMD_BAR1LMT_OFFSET; limit_reg = AMD_BAR1LMT_OFFSET;
/* split bar addr range must all be 32 bit */
if (addr & (~0ull << 32))
return -EINVAL;
if ((addr + size) & (~0ull << 32))
return -EINVAL;
/* Set the limit if supported */ /* Set the limit if supported */
limit = base_addr + size; limit = size;
/* set and verify setting the translation address */ /* set and verify setting the translation address */
write64(addr, peer_mmio + xlat_reg); write64(addr, peer_mmio + xlat_reg);
...@@ -199,6 +193,11 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev) ...@@ -199,6 +193,11 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev)
if (!ndev->peer_sta) if (!ndev->peer_sta)
return NTB_LNK_STA_ACTIVE(ndev->cntl_sta); return NTB_LNK_STA_ACTIVE(ndev->cntl_sta);
if (ndev->peer_sta & AMD_LINK_UP_EVENT) {
ndev->peer_sta = 0;
return 1;
}
/* If peer_sta is reset or D0 event, the ISR has /* If peer_sta is reset or D0 event, the ISR has
* started a timer to check link status of hardware. * started a timer to check link status of hardware.
* So here just clear status bit. And if peer_sta is * So here just clear status bit. And if peer_sta is
...@@ -207,7 +206,7 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev) ...@@ -207,7 +206,7 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev)
*/ */
if (ndev->peer_sta & AMD_PEER_RESET_EVENT) if (ndev->peer_sta & AMD_PEER_RESET_EVENT)
ndev->peer_sta &= ~AMD_PEER_RESET_EVENT; ndev->peer_sta &= ~AMD_PEER_RESET_EVENT;
else if (ndev->peer_sta & AMD_PEER_D0_EVENT) else if (ndev->peer_sta & (AMD_PEER_D0_EVENT | AMD_LINK_DOWN_EVENT))
ndev->peer_sta = 0; ndev->peer_sta = 0;
return 0; return 0;
...@@ -491,6 +490,8 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) ...@@ -491,6 +490,8 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
break; break;
case AMD_PEER_D3_EVENT: case AMD_PEER_D3_EVENT:
case AMD_PEER_PMETO_EVENT: case AMD_PEER_PMETO_EVENT:
case AMD_LINK_UP_EVENT:
case AMD_LINK_DOWN_EVENT:
amd_ack_smu(ndev, status); amd_ack_smu(ndev, status);
/* link down */ /* link down */
...@@ -598,7 +599,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev, ...@@ -598,7 +599,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev,
err_msix_request: err_msix_request:
while (i-- > 0) while (i-- > 0)
free_irq(ndev->msix[i].vector, ndev); free_irq(ndev->msix[i].vector, &ndev->vec[i]);
pci_disable_msix(pdev); pci_disable_msix(pdev);
err_msix_enable: err_msix_enable:
kfree(ndev->msix); kfree(ndev->msix);
......
...@@ -148,9 +148,12 @@ enum { ...@@ -148,9 +148,12 @@ enum {
AMD_PEER_D3_EVENT = BIT(2), AMD_PEER_D3_EVENT = BIT(2),
AMD_PEER_PMETO_EVENT = BIT(3), AMD_PEER_PMETO_EVENT = BIT(3),
AMD_PEER_D0_EVENT = BIT(4), AMD_PEER_D0_EVENT = BIT(4),
AMD_LINK_UP_EVENT = BIT(5),
AMD_LINK_DOWN_EVENT = BIT(6),
AMD_EVENT_INTMASK = (AMD_PEER_FLUSH_EVENT | AMD_EVENT_INTMASK = (AMD_PEER_FLUSH_EVENT |
AMD_PEER_RESET_EVENT | AMD_PEER_D3_EVENT | AMD_PEER_RESET_EVENT | AMD_PEER_D3_EVENT |
AMD_PEER_PMETO_EVENT | AMD_PEER_D0_EVENT), AMD_PEER_PMETO_EVENT | AMD_PEER_D0_EVENT |
AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT),
AMD_PMESTAT_OFFSET = 0x480, AMD_PMESTAT_OFFSET = 0x480,
AMD_PMSGTRIG_OFFSET = 0x490, AMD_PMSGTRIG_OFFSET = 0x490,
......
此差异已折叠。
...@@ -70,6 +70,7 @@ ...@@ -70,6 +70,7 @@
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX 0x6F0D #define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX 0x6F0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E #define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F #define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C
/* Intel Xeon hardware */ /* Intel Xeon hardware */
...@@ -150,6 +151,51 @@ ...@@ -150,6 +151,51 @@
#define XEON_DB_TOTAL_SHIFT 16 #define XEON_DB_TOTAL_SHIFT 16
#define XEON_SPAD_COUNT 16 #define XEON_SPAD_COUNT 16
/* Intel Skylake Xeon hardware */
#define SKX_IMBAR1SZ_OFFSET 0x00d0
#define SKX_IMBAR2SZ_OFFSET 0x00d1
#define SKX_EMBAR1SZ_OFFSET 0x00d2
#define SKX_EMBAR2SZ_OFFSET 0x00d3
#define SKX_DEVCTRL_OFFSET 0x0098
#define SKX_DEVSTS_OFFSET 0x009a
#define SKX_UNCERRSTS_OFFSET 0x014c
#define SKX_CORERRSTS_OFFSET 0x0158
#define SKX_LINK_STATUS_OFFSET 0x01a2
#define SKX_NTBCNTL_OFFSET 0x0000
#define SKX_IMBAR1XBASE_OFFSET 0x0010 /* SBAR2XLAT */
#define SKX_IMBAR1XLMT_OFFSET 0x0018 /* SBAR2LMT */
#define SKX_IMBAR2XBASE_OFFSET 0x0020 /* SBAR4XLAT */
#define SKX_IMBAR2XLMT_OFFSET 0x0028 /* SBAR4LMT */
#define SKX_IM_INT_STATUS_OFFSET 0x0040
#define SKX_IM_INT_DISABLE_OFFSET 0x0048
#define SKX_IM_SPAD_OFFSET 0x0080 /* SPAD */
#define SKX_USMEMMISS_OFFSET 0x0070
#define SKX_INTVEC_OFFSET 0x00d0
#define SKX_IM_DOORBELL_OFFSET 0x0100 /* SDOORBELL0 */
#define SKX_B2B_SPAD_OFFSET 0x0180 /* B2B SPAD */
#define SKX_EMBAR0XBASE_OFFSET 0x4008 /* B2B_XLAT */
#define SKX_EMBAR1XBASE_OFFSET 0x4010 /* PBAR2XLAT */
#define SKX_EMBAR1XLMT_OFFSET 0x4018 /* PBAR2LMT */
#define SKX_EMBAR2XBASE_OFFSET 0x4020 /* PBAR4XLAT */
#define SKX_EMBAR2XLMT_OFFSET 0x4028 /* PBAR4LMT */
#define SKX_EM_INT_STATUS_OFFSET 0x4040
#define SKX_EM_INT_DISABLE_OFFSET 0x4048
#define SKX_EM_SPAD_OFFSET 0x4080 /* remote SPAD */
#define SKX_EM_DOORBELL_OFFSET 0x4100 /* PDOORBELL0 */
#define SKX_SPCICMD_OFFSET 0x4504 /* SPCICMD */
#define SKX_EMBAR0_OFFSET 0x4510 /* SBAR0BASE */
#define SKX_EMBAR1_OFFSET 0x4518 /* SBAR23BASE */
#define SKX_EMBAR2_OFFSET 0x4520 /* SBAR45BASE */
#define SKX_DB_COUNT 32
#define SKX_DB_LINK 32
#define SKX_DB_LINK_BIT BIT_ULL(SKX_DB_LINK)
#define SKX_DB_MSIX_VECTOR_COUNT 33
#define SKX_DB_MSIX_VECTOR_SHIFT 1
#define SKX_DB_TOTAL_SHIFT 33
#define SKX_SPAD_COUNT 16
/* Intel Atom hardware */ /* Intel Atom hardware */
#define ATOM_SBAR2XLAT_OFFSET 0x0008 #define ATOM_SBAR2XLAT_OFFSET 0x0008
...@@ -240,6 +286,7 @@ ...@@ -240,6 +286,7 @@
#define NTB_HWERR_SDOORBELL_LOCKUP BIT_ULL(0) #define NTB_HWERR_SDOORBELL_LOCKUP BIT_ULL(0)
#define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1) #define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1)
#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2) #define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2)
#define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3)
/* flags to indicate unsafe api */ /* flags to indicate unsafe api */
#define NTB_UNSAFE_DB BIT_ULL(0) #define NTB_UNSAFE_DB BIT_ULL(0)
...@@ -263,6 +310,7 @@ struct intel_ntb_reg { ...@@ -263,6 +310,7 @@ struct intel_ntb_reg {
struct intel_ntb_alt_reg { struct intel_ntb_alt_reg {
unsigned long db_bell; unsigned long db_bell;
unsigned long db_mask; unsigned long db_mask;
unsigned long db_clear;
unsigned long spad; unsigned long spad;
}; };
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#define NTB_TRANSPORT_VER "4" #define NTB_TRANSPORT_VER "4"
#define NTB_TRANSPORT_NAME "ntb_transport" #define NTB_TRANSPORT_NAME "ntb_transport"
#define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB" #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
#define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2)
MODULE_DESCRIPTION(NTB_TRANSPORT_DESC); MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
MODULE_VERSION(NTB_TRANSPORT_VER); MODULE_VERSION(NTB_TRANSPORT_VER);
...@@ -242,9 +243,6 @@ enum { ...@@ -242,9 +243,6 @@ enum {
NUM_MWS, NUM_MWS,
MW0_SZ_HIGH, MW0_SZ_HIGH,
MW0_SZ_LOW, MW0_SZ_LOW,
MW1_SZ_HIGH,
MW1_SZ_LOW,
MAX_SPAD,
}; };
#define dev_client_dev(__dev) \ #define dev_client_dev(__dev) \
...@@ -811,7 +809,7 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) ...@@ -811,7 +809,7 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
{ {
struct ntb_transport_qp *qp; struct ntb_transport_qp *qp;
u64 qp_bitmap_alloc; u64 qp_bitmap_alloc;
int i; unsigned int i, count;
qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
...@@ -831,7 +829,8 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) ...@@ -831,7 +829,8 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
* goes down, blast them now to give them a sane value the next * goes down, blast them now to give them a sane value the next
* time they are accessed * time they are accessed
*/ */
for (i = 0; i < MAX_SPAD; i++) count = ntb_spad_count(nt->ndev);
for (i = 0; i < count; i++)
ntb_spad_write(nt->ndev, i, 0); ntb_spad_write(nt->ndev, i, 0);
} }
...@@ -960,7 +959,6 @@ static void ntb_qp_link_work(struct work_struct *work) ...@@ -960,7 +959,6 @@ static void ntb_qp_link_work(struct work_struct *work)
ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num)); ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
/* query remote spad for qp ready bits */ /* query remote spad for qp ready bits */
ntb_peer_spad_read(nt->ndev, QP_LINKS);
dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
/* See if the remote side is up */ /* See if the remote side is up */
...@@ -1064,17 +1062,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1064,17 +1062,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
{ {
struct ntb_transport_ctx *nt; struct ntb_transport_ctx *nt;
struct ntb_transport_mw *mw; struct ntb_transport_mw *mw;
unsigned int mw_count, qp_count; unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads;
u64 qp_bitmap; u64 qp_bitmap;
int node; int node;
int rc, i; int rc, i;
mw_count = ntb_mw_count(ndev); mw_count = ntb_mw_count(ndev);
if (ntb_spad_count(ndev) < (NUM_MWS + 1 + mw_count * 2)) {
dev_err(&ndev->dev, "Not enough scratch pad registers for %s",
NTB_TRANSPORT_NAME);
return -EIO;
}
if (ntb_db_is_unsafe(ndev)) if (ntb_db_is_unsafe(ndev))
dev_dbg(&ndev->dev, dev_dbg(&ndev->dev,
...@@ -1090,8 +1083,18 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1090,8 +1083,18 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
return -ENOMEM; return -ENOMEM;
nt->ndev = ndev; nt->ndev = ndev;
spad_count = ntb_spad_count(ndev);
/* Limit the MW's based on the availability of scratchpads */
if (spad_count < NTB_TRANSPORT_MIN_SPADS) {
nt->mw_count = 0;
rc = -EINVAL;
goto err;
}
nt->mw_count = mw_count; max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2;
nt->mw_count = min(mw_count, max_mw_count_for_spads);
nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec), nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
GFP_KERNEL, node); GFP_KERNEL, node);
......
...@@ -968,6 +968,9 @@ static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, ...@@ -968,6 +968,9 @@ static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
*/ */
static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx) static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
{ {
if (!ntb->ops->peer_spad_read)
return 0;
return ntb->ops->peer_spad_read(ntb, idx); return ntb->ops->peer_spad_read(ntb, idx);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册