Commit 0058b0a5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) BPF sample build fixes from Björn Töpel

 2) Fix powerpc bpf tail call implementation, from Eric Dumazet.

 3) DCCP leaks jiffies on the wire, fix also from Eric Dumazet.

 4) Fix crash in ebtables when using dnat target, from Florian Westphal.

 5) Fix port disable handling whne removing bcm_sf2 driver, from Florian
    Fainelli.

 6) Fix kTLS sk_msg trim on fallback to copy mode, from Jakub Kicinski.

 7) Various KCSAN fixes all over the networking, from Eric Dumazet.

 8) Memory leaks in mlx5 driver, from Alex Vesker.

 9) SMC interface refcounting fix, from Ursula Braun.

10) TSO descriptor handling fixes in stmmac driver, from Jose Abreu.

11) Add a TX lock to synchonize the kTLS TX path properly with crypto
    operations. From Jakub Kicinski.

12) Sock refcount during shutdown fix in vsock/virtio code, from Stefano
    Garzarella.

13) Infinite loop in Intel ice driver, from Colin Ian King.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (108 commits)
  ixgbe: need_wakeup flag might not be set for Tx
  i40e: need_wakeup flag might not be set for Tx
  igb/igc: use ktime accessors for skb->tstamp
  i40e: Fix for ethtool -m issue on X722 NIC
  iavf: initialize ITRN registers with correct values
  ice: fix potential infinite loop because loop counter being too small
  qede: fix NULL pointer deref in __qede_remove()
  net: fix data-race in neigh_event_send()
  vsock/virtio: fix sock refcnt holding during the shutdown
  net: ethernet: octeon_mgmt: Account for second possible VLAN header
  mac80211: fix station inactive_time shortly after boot
  net/fq_impl: Switch to kvmalloc() for memory allocation
  mac80211: fix ieee80211_txq_setup_flows() failure path
  ipv4: Fix table id reference in fib_sync_down_addr
  ipv6: fixes rt6_probe() and fib6_nh->last_probe init
  net: hns: Fix the stray netpoll locks causing deadlock in NAPI path
  net: usb: qmi_wwan: add support for DW5821e with eSIM support
  CDC-NCM: handle incomplete transfer of MTU
  nfc: netlink: fix double device reference drop
  NFC: st21nfca: fix double free
  ...
parents 5cb8418c a2582cdc
......@@ -436,6 +436,10 @@ by the driver:
encryption.
* ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream
but did not arrive in the expected order.
* ``tx_tls_skip_no_sync_data`` - number of TX packets which were part of
a TLS stream and arrived out-of-order, but skipped the HW offload routine
and went to the regular transmit flow as they were retransmissions of the
connection handshake.
* ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of
a TLS stream dropped, because they arrived out of order and associated
record could not be found.
......
......@@ -3053,6 +3053,7 @@ M: Daniel Borkmann <daniel@iogearbox.net>
R: Martin KaFai Lau <kafai@fb.com>
R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com>
R: Andrii Nakryiko <andriin@fb.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
......
......@@ -1141,6 +1141,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
goto out_addrs;
}
/*
* If we have seen a tail call, we need a second pass.
* This is because bpf_jit_emit_common_epilogue() is called
* from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
*/
if (cgctx.seen & SEEN_TAILCALL) {
cgctx.idx = 0;
if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
fp = org_fp;
goto out_addrs;
}
}
/*
* Pretend to build prologue, given the features we've seen. This will
* update ctgtx.idx as it pretends to output instructions, then we can
......
......@@ -2083,8 +2083,7 @@ static int bond_miimon_inspect(struct bonding *bond)
ignore_updelay = !rcu_dereference(bond->curr_active_slave);
bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
slave->link_new_state = slave->link;
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
link_state = bond_check_dev_link(bond, slave->dev, 0);
......@@ -2118,7 +2117,7 @@ static int bond_miimon_inspect(struct bonding *bond)
}
if (slave->delay <= 0) {
slave->new_link = BOND_LINK_DOWN;
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
continue;
}
......@@ -2155,7 +2154,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->delay = 0;
if (slave->delay <= 0) {
slave->new_link = BOND_LINK_UP;
bond_propose_link_state(slave, BOND_LINK_UP);
commit++;
ignore_updelay = false;
continue;
......@@ -2193,7 +2192,7 @@ static void bond_miimon_commit(struct bonding *bond)
struct slave *slave, *primary;
bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
switch (slave->link_new_state) {
case BOND_LINK_NOCHANGE:
/* For 802.3ad mode, check current slave speed and
* duplex again in case its port was disabled after
......@@ -2265,8 +2264,8 @@ static void bond_miimon_commit(struct bonding *bond)
default:
slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
slave->new_link);
slave->new_link = BOND_LINK_NOCHANGE;
slave->link_new_state);
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
continue;
}
......@@ -2674,13 +2673,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
slave->new_link = BOND_LINK_NOCHANGE;
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
slave->new_link = BOND_LINK_UP;
bond_propose_link_state(slave, BOND_LINK_UP);
slave_state_changed = 1;
/* primary_slave has no meaning in round-robin
......@@ -2705,7 +2704,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
if (!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, slave->last_rx, 2)) {
slave->new_link = BOND_LINK_DOWN;
bond_propose_link_state(slave, BOND_LINK_DOWN);
slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX)
......@@ -2736,8 +2735,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
goto re_arm;
bond_for_each_slave(bond, slave, iter) {
if (slave->new_link != BOND_LINK_NOCHANGE)
slave->link = slave->new_link;
if (slave->link_new_state != BOND_LINK_NOCHANGE)
slave->link = slave->link_new_state;
}
if (slave_state_changed) {
......@@ -2760,9 +2759,9 @@ re_arm:
}
/* Called to inspect slaves for active-backup mode ARP monitor link state
* changes. Sets new_link in slaves to specify what action should take
* place for the slave. Returns 0 if no changes are found, >0 if changes
* to link states must be committed.
* changes. Sets proposed link state in slaves to specify what action
* should take place for the slave. Returns 0 if no changes are found, >0
* if changes to link states must be committed.
*
* Called with rcu_read_lock held.
*/
......@@ -2774,12 +2773,12 @@ static int bond_ab_arp_inspect(struct bonding *bond)
int commit = 0;
bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
last_rx = slave_last_rx(bond, slave);
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, last_rx, 1)) {
slave->new_link = BOND_LINK_UP;
bond_propose_link_state(slave, BOND_LINK_UP);
commit++;
}
continue;
......@@ -2807,7 +2806,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
if (!bond_is_active_slave(slave) &&
!rcu_access_pointer(bond->current_arp_slave) &&
!bond_time_in_interval(bond, last_rx, 3)) {
slave->new_link = BOND_LINK_DOWN;
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
......@@ -2820,7 +2819,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
if (bond_is_active_slave(slave) &&
(!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, last_rx, 2))) {
slave->new_link = BOND_LINK_DOWN;
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
}
......@@ -2840,7 +2839,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
switch (slave->link_new_state) {
case BOND_LINK_NOCHANGE:
continue;
......@@ -2890,8 +2889,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
default:
slave_err(bond->dev, slave->dev, "impossible: new_link %d on slave\n",
slave->new_link);
slave_err(bond->dev, slave->dev,
"impossible: link_new_state %d on slave\n",
slave->link_new_state);
continue;
}
......
......@@ -52,6 +52,7 @@
#define CONTROL_EX_PDR BIT(8)
/* control register */
#define CONTROL_SWR BIT(15)
#define CONTROL_TEST BIT(7)
#define CONTROL_CCE BIT(6)
#define CONTROL_DISABLE_AR BIT(5)
......@@ -97,6 +98,9 @@
#define BTR_TSEG2_SHIFT 12
#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
/* interrupt register */
#define INT_STS_PENDING 0x8000
/* brp extension register */
#define BRP_EXT_BRPE_MASK 0x0f
#define BRP_EXT_BRPE_SHIFT 0
......@@ -569,6 +573,26 @@ static void c_can_configure_msg_objects(struct net_device *dev)
IF_MCONT_RCV_EOB);
}
static int c_can_software_reset(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
int retry = 0;
if (priv->type != BOSCH_D_CAN)
return 0;
priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT);
while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) {
msleep(20);
if (retry++ > 100) {
netdev_err(dev, "CCTRL: software reset failed\n");
return -EIO;
}
}
return 0;
}
/*
* Configure C_CAN chip:
* - enable/disable auto-retransmission
......@@ -578,6 +602,11 @@ static void c_can_configure_msg_objects(struct net_device *dev)
static int c_can_chip_config(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
int err;
err = c_can_software_reset(dev);
if (err)
return err;
/* enable automatic retransmission */
priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
......@@ -886,6 +915,9 @@ static int c_can_handle_state_change(struct net_device *dev,
struct can_berr_counter bec;
switch (error_type) {
case C_CAN_NO_ERROR:
priv->can.state = CAN_STATE_ERROR_ACTIVE;
break;
case C_CAN_ERROR_WARNING:
/* error warning state */
priv->can.can_stats.error_warning++;
......@@ -916,6 +948,13 @@ static int c_can_handle_state_change(struct net_device *dev,
ERR_CNT_RP_SHIFT;
switch (error_type) {
case C_CAN_NO_ERROR:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case C_CAN_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL;
......@@ -1029,10 +1068,16 @@ static int c_can_poll(struct napi_struct *napi, int quota)
u16 curr, last = priv->last_status;
int work_done = 0;
priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
/* Ack status on C_CAN. D_CAN is self clearing */
if (priv->type != BOSCH_D_CAN)
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
/* Only read the status register if a status interrupt was pending */
if (atomic_xchg(&priv->sie_pending, 0)) {
priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
/* Ack status on C_CAN. D_CAN is self clearing */
if (priv->type != BOSCH_D_CAN)
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
} else {
/* no change detected ... */
curr = last;
}
/* handle state changes */
if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
......@@ -1054,11 +1099,17 @@ static int c_can_poll(struct napi_struct *napi, int quota)
/* handle bus recovery events */
if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
netdev_dbg(dev, "left bus off state\n");
priv->can.state = CAN_STATE_ERROR_ACTIVE;
work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
}
if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
netdev_dbg(dev, "left error passive state\n");
priv->can.state = CAN_STATE_ERROR_ACTIVE;
work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
}
if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) {
netdev_dbg(dev, "left error warning state\n");
work_done += c_can_handle_state_change(dev, C_CAN_NO_ERROR);
}
/* handle lec errors on the bus */
......@@ -1083,10 +1134,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct c_can_priv *priv = netdev_priv(dev);
int reg_int;
if (!priv->read_reg(priv, C_CAN_INT_REG))
reg_int = priv->read_reg(priv, C_CAN_INT_REG);
if (!reg_int)
return IRQ_NONE;
/* save for later use */
if (reg_int & INT_STS_PENDING)
atomic_set(&priv->sie_pending, 1);
/* disable all interrupts and schedule the NAPI */
c_can_irq_control(priv, false);
napi_schedule(&priv->napi);
......
......@@ -198,6 +198,7 @@ struct c_can_priv {
struct net_device *dev;
struct device *device;
atomic_t tx_active;
atomic_t sie_pending;
unsigned long tx_dir;
int last_status;
u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
......
......@@ -848,6 +848,7 @@ void of_can_transceiver(struct net_device *dev)
return;
ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
of_node_put(dn);
if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
}
......
......@@ -677,6 +677,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
struct can_frame *cf;
bool rx_errors = false, tx_errors = false;
u32 timestamp;
int err;
timestamp = priv->read(&regs->timer) << 16;
......@@ -725,7 +726,9 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
if (tx_errors)
dev->stats.tx_errors++;
can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
......@@ -738,6 +741,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
int flt;
struct can_berr_counter bec;
u32 timestamp;
int err;
timestamp = priv->read(&regs->timer) << 16;
......@@ -769,7 +773,9 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
......@@ -1188,6 +1194,7 @@ static int flexcan_chip_start(struct net_device *dev)
reg_mecr = priv->read(&regs->mecr);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
priv->write(reg_mecr, &regs->mecr);
reg_mecr |= FLEXCAN_MECR_ECCDIS;
reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
FLEXCAN_MECR_FANCEI_MSK);
priv->write(reg_mecr, &regs->mecr);
......
......@@ -107,37 +107,95 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
return cb_b->timestamp - cb_a->timestamp;
}
static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
/**
* can_rx_offload_offload_one() - Read one CAN frame from HW
* @offload: pointer to rx_offload context
* @n: number of mailbox to read
*
* The task of this function is to read a CAN frame from mailbox @n
* from the device and return the mailbox's content as a struct
* sk_buff.
*
* If the struct can_rx_offload::skb_queue exceeds the maximal queue
* length (struct can_rx_offload::skb_queue_len_max) or no skb can be
* allocated, the mailbox contents is discarded by reading it into an
* overflow buffer. This way the mailbox is marked as free by the
* driver.
*
* Return: A pointer to skb containing the CAN frame on success.
*
* NULL if the mailbox @n is empty.
*
* ERR_PTR() in case of an error
*/
static struct sk_buff *
can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
{
struct sk_buff *skb = NULL;
struct sk_buff *skb = NULL, *skb_error = NULL;
struct can_rx_offload_cb *cb;
struct can_frame *cf;
int ret;
/* If queue is full or skb not available, read to discard mailbox */
if (likely(skb_queue_len(&offload->skb_queue) <=
offload->skb_queue_len_max))
if (likely(skb_queue_len(&offload->skb_queue) <
offload->skb_queue_len_max)) {
skb = alloc_can_skb(offload->dev, &cf);
if (unlikely(!skb))
skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
} else {
skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
}
if (!skb) {
/* If queue is full or skb not available, drop by reading into
* overflow buffer.
*/
if (unlikely(skb_error)) {
struct can_frame cf_overflow;
u32 timestamp;
ret = offload->mailbox_read(offload, &cf_overflow,
&timestamp, n);
if (ret)
offload->dev->stats.rx_dropped++;
return NULL;
/* Mailbox was empty. */
if (unlikely(!ret))
return NULL;
/* Mailbox has been read and we're dropping it or
* there was a problem reading the mailbox.
*
* Increment error counters in any case.
*/
offload->dev->stats.rx_dropped++;
offload->dev->stats.rx_fifo_errors++;
/* There was a problem reading the mailbox, propagate
* error value.
*/
if (unlikely(ret < 0))
return ERR_PTR(ret);
return skb_error;
}
cb = can_rx_offload_get_cb(skb);
ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
if (!ret) {
/* Mailbox was empty. */
if (unlikely(!ret)) {
kfree_skb(skb);
return NULL;
}
/* There was a problem reading the mailbox, propagate error value. */
if (unlikely(ret < 0)) {
kfree_skb(skb);
offload->dev->stats.rx_dropped++;
offload->dev->stats.rx_fifo_errors++;
return ERR_PTR(ret);
}
/* Mailbox was read. */
return skb;
}
......@@ -157,8 +215,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen
continue;
skb = can_rx_offload_offload_one(offload, i);
if (!skb)
break;
if (IS_ERR_OR_NULL(skb))
continue;
__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
}
......@@ -188,7 +246,13 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
struct sk_buff *skb;
int received = 0;
while ((skb = can_rx_offload_offload_one(offload, 0))) {
while (1) {
skb = can_rx_offload_offload_one(offload, 0);
if (IS_ERR(skb))
continue;
if (!skb)
break;
skb_queue_tail(&offload->skb_queue, skb);
received++;
}
......@@ -207,8 +271,10 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
unsigned long flags;
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max)
return -ENOMEM;
offload->skb_queue_len_max) {
kfree_skb(skb);
return -ENOBUFS;
}
cb = can_rx_offload_get_cb(skb);
cb->timestamp = timestamp;
......@@ -250,8 +316,10 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb)
{
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max)
return -ENOMEM;
offload->skb_queue_len_max) {
kfree_skb(skb);
return -ENOBUFS;
}
skb_queue_tail(&offload->skb_queue, skb);
can_rx_offload_schedule(offload);
......
......@@ -717,6 +717,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
if (priv->after_suspend) {
mcp251x_hw_reset(spi);
mcp251x_setup(net, spi);
priv->force_quit = 0;
if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
mcp251x_set_normal_mode(spi);
} else if (priv->after_suspend & AFTER_SUSPEND_UP) {
......@@ -728,7 +729,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mcp251x_hw_sleep(spi);
}
priv->after_suspend = 0;
priv->force_quit = 0;
}
if (priv->restart_tx) {
......
......@@ -73,6 +73,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
*/
#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
#define HECC_RX_LAST_MBOX (HECC_MAX_TX_MBOX)
/* TI HECC module registers */
#define HECC_CANME 0x0 /* Mailbox enable */
......@@ -82,7 +83,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_CANTA 0x10 /* Transmission acknowledge */
#define HECC_CANAA 0x14 /* Abort acknowledge */
#define HECC_CANRMP 0x18 /* Receive message pending */
#define HECC_CANRML 0x1C /* Remote message lost */
#define HECC_CANRML 0x1C /* Receive message lost */
#define HECC_CANRFP 0x20 /* Remote frame pending */
#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */
#define HECC_CANMC 0x28 /* Master control */
......@@ -149,6 +150,8 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\
HECC_CANES_CRCE | HECC_CANES_SE |\
HECC_CANES_ACKE)
#define HECC_CANES_FLAGS (HECC_BUS_ERROR | HECC_CANES_BO |\
HECC_CANES_EP | HECC_CANES_EW)
#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */
......@@ -382,8 +385,18 @@ static void ti_hecc_start(struct net_device *ndev)
hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
}
/* Prevent message over-write & Enable interrupts */
hecc_write(priv, HECC_CANOPC, HECC_SET_REG);
/* Enable tx interrupts */
hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1);
/* Prevent message over-write to create a rx fifo, but not for
* the lowest priority mailbox, since that allows detecting
* overflows instead of the hardware silently dropping the
* messages.
*/
mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
hecc_write(priv, HECC_CANOPC, mbx_mask);
/* Enable interrupts */
if (priv->use_hecc1int) {
hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
......@@ -400,6 +413,9 @@ static void ti_hecc_stop(struct net_device *ndev)
{
struct ti_hecc_priv *priv = netdev_priv(ndev);
/* Disable the CPK; stop sending, erroring and acking */
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
/* Disable interrupts and disable mailboxes */
hecc_write(priv, HECC_CANGIM, 0);
hecc_write(priv, HECC_CANMIM, 0);
......@@ -508,8 +524,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
hecc_set_bit(priv