gitlab.arm.com will be in the maintainance mode on Wednesday June 29th 01:00 - 10:00 (UTC+1). Repositories is read only during the maintainance.

Commit d3658c22 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'ntb-4.16' of git://github.com/jonmason/ntb

Pull NTB updates from Jon Mason:
 "Bug fixes galore, removal of the ntb atom driver, and updates to the
  ntb tools and tests to support the multi-port interface"

* tag 'ntb-4.16' of git://github.com/jonmason/ntb: (37 commits)
  NTB: ntb_perf: fix cast to restricted __le32
  ntb_perf: Fix an error code in perf_copy_chunk()
  ntb_hw_switchtec: Make function switchtec_ntb_remove() static
  NTB: ntb_tool: fix memory leak on 'buf' on error exit path
  NTB: ntb_perf: fix printing of resource_size_t
  NTB: ntb_hw_idt: Set NTB_TOPO_SWITCH topology
  NTB: ntb_test: Update ntb_perf tests
  NTB: ntb_test: Update ntb_tool MW tests
  NTB: ntb_test: Add ntb_tool Message tests
  NTB: ntb_test: Update ntb_tool Scratchpad tests
  NTB: ntb_test: Update ntb_tool DB tests
  NTB: ntb_test: Update ntb_tool link tests
  NTB: ntb_test: Add ntb_tool port tests
  NTB: ntb_test: Safely use paths with whitespace
  NTB: ntb_perf: Add full multi-port NTB API support
  NTB: ntb_tool: Add full multi-port NTB API support
  NTB: ntb_pp: Add full multi-port NTB API support
  NTB: Fix UB/bug in ntb_mw_get_align()
  NTB: Set dma mask and dma coherent mask to NTB devices
  NTB: Rename NTB messaging API methods
  ...
parents 8ac4840a 3b28c987
......@@ -9801,7 +9801,7 @@ F: drivers/ntb/hw/amd/
NTB DRIVER CORE
M: Jon Mason <jdmason@kudzu.us>
M: Dave Jiang <dave.jiang@intel.com>
M: Allen Hubbe <Allen.Hubbe@emc.com>
M: Allen Hubbe <allenbh@gmail.com>
L: linux-ntb@googlegroups.com
S: Supported
W: https://github.com/jonmason/ntb/wiki
......
......@@ -1020,6 +1020,10 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
}
rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
dma_get_mask(&pdev->dev));
if (rc)
goto err_dma_mask;
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {
......
......@@ -1744,20 +1744,19 @@ static int idt_ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
* idt_ntb_msg_read() - read message register with specified index
* (NTB API callback)
* @ntb: NTB device context.
* @midx: Message register index
* @pidx: OUT - Port index of peer device a message retrieved from
* @msg: OUT - Data
* @midx: Message register index
*
* Read data from the specified message register and source register.
*
* Return: zero on success, negative error if invalid argument passed.
* Return: inbound message register value.
*/
static int idt_ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg)
static u32 idt_ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
if (midx < 0 || IDT_MSG_CNT <= midx)
return -EINVAL;
return ~(u32)0;
/* Retrieve source port index of the message */
if (pidx != NULL) {
......@@ -1772,18 +1771,15 @@ static int idt_ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg)
}
/* Retrieve data of the corresponding message register */
if (msg != NULL)
*msg = idt_nt_read(ndev, ntdata_tbl.msgs[midx].in);
return 0;
return idt_nt_read(ndev, ntdata_tbl.msgs[midx].in);
}
/*
* idt_ntb_msg_write() - write data to the specified message register
* (NTB API callback)
* idt_ntb_peer_msg_write() - write data to the specified message register
* (NTB API callback)
* @ntb: NTB device context.
* @midx: Message register index
* @pidx: Port index of peer device a message being sent to
* @midx: Message register index
* @msg: Data to send
*
* Just try to send data to a peer. Message status register should be
......@@ -1791,7 +1787,8 @@ static int idt_ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg)
*
* Return: zero on success, negative error if invalid argument passed.
*/
static int idt_ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx, u32 msg)
static int idt_ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
u32 msg)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
unsigned long irqflags;
......@@ -2058,7 +2055,7 @@ static const struct ntb_dev_ops idt_ntb_ops = {
.msg_set_mask = idt_ntb_msg_set_mask,
.msg_clear_mask = idt_ntb_msg_clear_mask,
.msg_read = idt_ntb_msg_read,
.msg_write = idt_ntb_msg_write
.peer_msg_write = idt_ntb_peer_msg_write
};
/*
......@@ -2073,7 +2070,7 @@ static int idt_register_device(struct idt_ntb_dev *ndev)
/* Initialize the rest of NTB device structure and register it */
ndev->ntb.ops = &idt_ntb_ops;
ndev->ntb.topo = NTB_TOPO_PRI;
ndev->ntb.topo = NTB_TOPO_SWITCH;
ret = ntb_register_device(&ndev->ntb);
if (ret != 0) {
......@@ -2269,7 +2266,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
"Message data:\n");
for (idx = 0; idx < IDT_MSG_CNT; idx++) {
int src;
(void)idt_ntb_msg_read(&ndev->ntb, idx, &src, &data);
data = idt_ntb_msg_read(&ndev->ntb, &src, idx);
off += scnprintf(strbuf + off, size - off,
"\t%hhu. 0x%08x from peer %hhu (Port %hhu)\n",
idx, data, src, ndev->peers[src].port);
......@@ -2429,7 +2426,7 @@ static int idt_init_pci(struct idt_ntb_dev *ndev)
struct pci_dev *pdev = ndev->ntb.pdev;
int ret;
/* Initialize the bit mask of DMA */
/* Initialize the bit mask of PCI/NTB DMA */
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret != 0) {
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
......@@ -2450,6 +2447,12 @@ static int idt_init_pci(struct idt_ntb_dev *ndev)
dev_warn(&pdev->dev,
"Cannot set consistent DMA highmem bit mask\n");
}
ret = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
dma_get_mask(&pdev->dev));
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set NTB device DMA bit mask\n");
return ret;
}
/*
* Enable the device advanced error reporting. It's not critical to
......
......@@ -74,12 +74,6 @@ MODULE_AUTHOR("Intel Corporation");
#define bar0_off(base, bar) ((base) + ((bar) << 2))
#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
static const struct intel_ntb_reg atom_reg;
static const struct intel_ntb_alt_reg atom_pri_reg;
static const struct intel_ntb_alt_reg atom_sec_reg;
static const struct intel_ntb_alt_reg atom_b2b_reg;
static const struct intel_ntb_xlat_reg atom_pri_xlat;
static const struct intel_ntb_xlat_reg atom_sec_xlat;
static const struct intel_ntb_reg xeon_reg;
static const struct intel_ntb_alt_reg xeon_pri_reg;
static const struct intel_ntb_alt_reg xeon_sec_reg;
......@@ -184,15 +178,6 @@ static inline void _iowrite64(u64 val, void __iomem *mmio)
#endif
#endif
static inline int pdev_is_atom(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
return 1;
}
return 0;
}
static inline int pdev_is_xeon(struct pci_dev *pdev)
{
switch (pdev->device) {
......@@ -1006,8 +991,7 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
{
struct intel_ntb_dev *ndev = filp->private_data;
if (pdev_is_xeon(ndev->ntb.pdev) ||
pdev_is_atom(ndev->ntb.pdev))
if (pdev_is_xeon(ndev->ntb.pdev))
return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
else if (pdev_is_skx_xeon(ndev->ntb.pdev))
return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
......@@ -1439,242 +1423,6 @@ static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
ndev->peer_reg->spad);
}
/* ATOM */
static u64 atom_db_ioread(void __iomem *mmio)
{
return ioread64(mmio);
}
static void atom_db_iowrite(u64 bits, void __iomem *mmio)
{
iowrite64(bits, mmio);
}
static int atom_poll_link(struct intel_ntb_dev *ndev)
{
u32 ntb_ctl;
ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
if (ntb_ctl == ndev->ntb_ctl)
return 0;
ndev->ntb_ctl = ntb_ctl;
ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
return 1;
}
static int atom_link_is_up(struct intel_ntb_dev *ndev)
{
return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
}
static int atom_link_is_err(struct intel_ntb_dev *ndev)
{
if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
& ATOM_LTSSMSTATEJMP_FORCEDETECT)
return 1;
if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
& ATOM_IBIST_ERR_OFLOW)
return 1;
return 0;
}
static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
{
struct device *dev = &ndev->ntb.pdev->dev;
switch (ppd & ATOM_PPD_TOPO_MASK) {
case ATOM_PPD_TOPO_B2B_USD:
dev_dbg(dev, "PPD %d B2B USD\n", ppd);
return NTB_TOPO_B2B_USD;
case ATOM_PPD_TOPO_B2B_DSD:
dev_dbg(dev, "PPD %d B2B DSD\n", ppd);
return NTB_TOPO_B2B_DSD;
case ATOM_PPD_TOPO_PRI_USD:
case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
case ATOM_PPD_TOPO_SEC_USD:
case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
dev_dbg(dev, "PPD %d non B2B disabled\n", ppd);
return NTB_TOPO_NONE;
}
dev_dbg(dev, "PPD %d invalid\n", ppd);
return NTB_TOPO_NONE;
}
static void atom_link_hb(struct work_struct *work)
{
struct intel_ntb_dev *ndev = hb_ndev(work);
struct device *dev = &ndev->ntb.pdev->dev;
unsigned long poll_ts;
void __iomem *mmio;
u32 status32;
poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
/* Delay polling the link status if an interrupt was received,
* unless the cached link status says the link is down.
*/
if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
return;
}
if (atom_poll_link(ndev))
ntb_link_event(&ndev->ntb);
if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
return;
}
/* Link is down with error: recover the link! */
mmio = ndev->self_mmio;
/* Driver resets the NTB ModPhy lanes - magic! */
iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
/* Driver waits 100ms to allow the NTB ModPhy to settle */
msleep(100);
/* Clear AER Errors, write to clear */
status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
dev_dbg(dev, "ERRCORSTS = %x\n", status32);
status32 &= PCI_ERR_COR_REP_ROLL;
iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
/* Clear unexpected electrical idle event in LTSSM, write to clear */
status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
dev_dbg(dev, "LTSSMERRSTS0 = %x\n", status32);
status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
/* Clear DeSkew Buffer error, write to clear */
status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
dev_dbg(dev, "DESKEWSTS = %x\n", status32);
status32 |= ATOM_DESKEWSTS_DBERR;
iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
dev_dbg(dev, "IBSTERRRCRVSTS0 = %x\n", status32);
status32 &= ATOM_IBIST_ERR_OFLOW;
iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
/* Releases the NTB state machine to allow the link to retrain */
status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
dev_dbg(dev, "LTSSMSTATEJMP = %x\n", status32);
status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
/* There is a potential race between the 2 NTB devices recovering at the
* same time. If the times are the same, the link will not recover and
* the driver will be stuck in this loop forever. Add a random interval
* to the recovery time to prevent this race.
*/
schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
+ prandom_u32() % ATOM_LINK_RECOVERY_TIME);
}
static int atom_init_isr(struct intel_ntb_dev *ndev)
{
int rc;
rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
if (rc)
return rc;
/* ATOM doesn't have link status interrupt, poll on that platform */
ndev->last_ts = jiffies;
INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
return 0;
}
static void atom_deinit_isr(struct intel_ntb_dev *ndev)
{
cancel_delayed_work_sync(&ndev->hb_timer);
ndev_deinit_isr(ndev);
}
static int atom_init_ntb(struct intel_ntb_dev *ndev)
{
ndev->mw_count = ATOM_MW_COUNT;
ndev->spad_count = ATOM_SPAD_COUNT;
ndev->db_count = ATOM_DB_COUNT;
switch (ndev->ntb.topo) {
case NTB_TOPO_B2B_USD:
case NTB_TOPO_B2B_DSD:
ndev->self_reg = &atom_pri_reg;
ndev->peer_reg = &atom_b2b_reg;
ndev->xlat_reg = &atom_sec_xlat;
/* Enable Bus Master and Memory Space on the secondary side */
iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
ndev->self_mmio + ATOM_SPCICMD_OFFSET);
break;
default:
return -EINVAL;
}
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
return 0;
}
static int atom_init_dev(struct intel_ntb_dev *ndev)
{
u32 ppd;
int rc;
rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
if (rc)
return -EIO;
ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
if (ndev->ntb.topo == NTB_TOPO_NONE)
return -EINVAL;
rc = atom_init_ntb(ndev);
if (rc)
return rc;
rc = atom_init_isr(ndev);
if (rc)
return rc;
if (ndev->ntb.topo != NTB_TOPO_SEC) {
/* Initiate PCI-E link training */
rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
ppd | ATOM_PPD_INIT_LINK);
if (rc)
return rc;
}
return 0;
}
static void atom_deinit_dev(struct intel_ntb_dev *ndev)
{
atom_deinit_isr(ndev);
}
/* Skylake Xeon NTB */
static int skx_poll_link(struct intel_ntb_dev *ndev)
......@@ -2586,6 +2334,10 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
}
rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
dma_get_mask(&pdev->dev));
if (rc)
goto err_dma_mask;
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {
......@@ -2658,24 +2410,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
node = dev_to_node(&pdev->dev);
if (pdev_is_atom(pdev)) {
ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
if (!ndev) {
rc = -ENOMEM;
goto err_ndev;
}
ndev_init_struct(ndev, pdev);
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
goto err_init_pci;
rc = atom_init_dev(ndev);
if (rc)
goto err_init_dev;
} else if (pdev_is_xeon(pdev)) {
if (pdev_is_xeon(pdev)) {
ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
if (!ndev) {
rc = -ENOMEM;
......@@ -2731,9 +2466,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
err_register:
ndev_deinit_debugfs(ndev);
if (pdev_is_atom(pdev))
atom_deinit_dev(ndev);
else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
......@@ -2749,41 +2482,12 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
if (pdev_is_atom(pdev))
atom_deinit_dev(ndev);
else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
}
static const struct intel_ntb_reg atom_reg = {
.poll_link = atom_poll_link,
.link_is_up = atom_link_is_up,
.db_ioread = atom_db_ioread,
.db_iowrite = atom_db_iowrite,
.db_size = sizeof(u64),
.ntb_ctl = ATOM_NTBCNTL_OFFSET,
.mw_bar = {2, 4},
};
static const struct intel_ntb_alt_reg atom_pri_reg = {
.db_bell = ATOM_PDOORBELL_OFFSET,
.db_mask = ATOM_PDBMSK_OFFSET,
.spad = ATOM_SPAD_OFFSET,
};
static const struct intel_ntb_alt_reg atom_b2b_reg = {
.db_bell = ATOM_B2B_DOORBELL_OFFSET,
.spad = ATOM_B2B_SPAD_OFFSET,
};
static const struct intel_ntb_xlat_reg atom_sec_xlat = {
/* FIXME : .bar0_base = ATOM_SBAR0BASE_OFFSET, */
/* FIXME : .bar2_limit = ATOM_SBAR2LMT_OFFSET, */
.bar2_xlat = ATOM_SBAR2XLAT_OFFSET,
};
static const struct intel_ntb_reg xeon_reg = {
.poll_link = xeon_poll_link,
.link_is_up = xeon_link_is_up,
......@@ -2940,7 +2644,6 @@ static const struct file_operations intel_ntb_debugfs_info = {
};
static const struct pci_device_id intel_ntb_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
......
......@@ -66,7 +66,6 @@
#define PCI_DEVICE_ID_INTEL_NTB_B2B_HSX 0x2F0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX 0x6F0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
......@@ -196,63 +195,6 @@
#define SKX_DB_TOTAL_SHIFT 33
#define SKX_SPAD_COUNT 16
/* Intel Atom hardware */
#define ATOM_SBAR2XLAT_OFFSET 0x0008
#define ATOM_PDOORBELL_OFFSET 0x0020
#define ATOM_PDBMSK_OFFSET 0x0028
#define ATOM_NTBCNTL_OFFSET 0x0060
#define ATOM_SPAD_OFFSET 0x0080
#define ATOM_PPD_OFFSET 0x00d4
#define ATOM_PBAR2XLAT_OFFSET 0x8008
#define ATOM_B2B_DOORBELL_OFFSET 0x8020
#define ATOM_B2B_SPAD_OFFSET 0x8080
#define ATOM_SPCICMD_OFFSET 0xb004
#define ATOM_LINK_STATUS_OFFSET 0xb052
#define ATOM_ERRCORSTS_OFFSET 0xb110
#define ATOM_IP_BASE 0xc000
#define ATOM_DESKEWSTS_OFFSET (ATOM_IP_BASE + 0x3024)
#define ATOM_LTSSMERRSTS0_OFFSET (ATOM_IP_BASE + 0x3180)
#define ATOM_LTSSMSTATEJMP_OFFSET (ATOM_IP_BASE + 0x3040)
#define ATOM_IBSTERRRCRVSTS0_OFFSET (ATOM_IP_BASE + 0x3324)
#define ATOM_MODPHY_PCSREG4 0x1c004
#define ATOM_MODPHY_PCSREG6 0x1c006
#define ATOM_PPD_INIT_LINK 0x0008
#define ATOM_PPD_CONN_MASK 0x0300
#define ATOM_PPD_CONN_TRANSPARENT 0x0000
#define ATOM_PPD_CONN_B2B 0x0100
#define ATOM_PPD_CONN_RP 0x0200
#define ATOM_PPD_DEV_MASK 0x1000
#define ATOM_PPD_DEV_USD 0x0000
#define ATOM_PPD_DEV_DSD 0x1000
#define ATOM_PPD_TOPO_MASK (ATOM_PPD_CONN_MASK | ATOM_PPD_DEV_MASK)
#define ATOM_PPD_TOPO_PRI_USD (ATOM_PPD_CONN_TRANSPARENT | ATOM_PPD_DEV_USD)
#define ATOM_PPD_TOPO_PRI_DSD (ATOM_PPD_CONN_TRANSPARENT | ATOM_PPD_DEV_DSD)
#define ATOM_PPD_TOPO_SEC_USD (ATOM_PPD_CONN_RP | ATOM_PPD_DEV_USD)
#define ATOM_PPD_TOPO_SEC_DSD (ATOM_PPD_CONN_RP | ATOM_PPD_DEV_DSD)
#define ATOM_PPD_TOPO_B2B_USD (ATOM_PPD_CONN_B2B | ATOM_PPD_DEV_USD)
#define ATOM_PPD_TOPO_B2B_DSD (ATOM_PPD_CONN_B2B | ATOM_PPD_DEV_DSD)
#define ATOM_MW_COUNT 2
#define ATOM_DB_COUNT 34
#define ATOM_DB_VALID_MASK (BIT_ULL(ATOM_DB_COUNT) - 1)
#define ATOM_DB_MSIX_VECTOR_COUNT 34
#define ATOM_DB_MSIX_VECTOR_SHIFT 1
#define ATOM_DB_TOTAL_SHIFT 34
#define ATOM_SPAD_COUNT 16
#define ATOM_NTB_CTL_DOWN_BIT BIT(16)
#define ATOM_NTB_CTL_ACTIVE(x) !(x & ATOM_NTB_CTL_DOWN_BIT)
#define ATOM_DESKEWSTS_DBERR BIT(15)
#define ATOM_LTSSMERRSTS0_UNEXPECTEDEI BIT(20)
#define ATOM_LTSSMSTATEJMP_FORCEDETECT BIT(2)
#define ATOM_IBIST_ERR_OFLOW 0x7FFF7FFF
#define ATOM_LINK_HB_TIMEOUT msecs_to_jiffies(1000)
#define ATOM_LINK_RECOVERY_TIME msecs_to_jiffies(500)
/* Ntb control and link status */
#define NTB_CTL_CFG_LOCK BIT(0)
......
This diff is collapsed.
......@@ -63,12 +63,11 @@
#define DRIVER_NAME "ntb"
#define DRIVER_DESCRIPTION "PCIe NTB Driver Framework"
#define DRIVER_LICENSE "Dual BSD/GPL"
#define DRIVER_VERSION "1.0"
#define DRIVER_RELDATE "24 March 2015"
#define DRIVER_AUTHOR "Allen Hubbe <Allen.Hubbe@emc.com>"
MODULE_LICENSE(DRIVER_LICENSE);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
......@@ -112,7 +111,6 @@ int ntb_register_device(struct ntb_dev *ntb)
init_completion(&ntb->released);
memset(&ntb->dev, 0, sizeof(ntb->dev));
ntb->dev.bus = &ntb_bus;
ntb->dev.parent = &ntb->pdev->dev;
ntb->dev.release = ntb_dev_release;
......
......@@ -1003,6 +1003,9 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
mw_base = nt->mw_vec[mw_num].phys_addr;
mw_size = nt->mw_vec[mw_num].phys_size;
if (max_mw_size && mw_size > max_mw_size)
mw_size = max_mw_size;
tx_size = (unsigned int)mw_size / num_qps_mw;
qp_offset = tx_size * (qp_num / mw_count);