Commit 07230906 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Handle stations tied to AP_VLANs properly during mac80211 hw
    reconfig. From Manikanta Pubbisetty.

 2) Fix jump stack depth validation in nf_tables, from Taehee Yoo.

 3) Fix quota handling in aRFS flow expiration of mlx5 driver, from Eran
    Ben Elisha.

 4) Exit path handling fix in powerpc64 BPF JIT, from Daniel Borkmann.

 5) Use ptr_ring_consume_bh() in page pool code, from Tariq Toukan.

 6) Fix cached netdev name leak in nf_tables, from Florian Westphal.

 7) Fix memory leaks on chain rename, also from Florian Westphal.

 8) Several fixes to DCTCP congestion control ACK handling, from Yuchunk
    Cheng.

 9) Missing rcu_read_unlock() in CAIF protocol code, from Yue Haibing.

10) Fix link local address handling with VRF, from David Ahern.

11) Don't clobber 'err' on a successful call to __skb_linearize() in
    skb_segment(). From Eric Dumazet.

12) Fix vxlan fdb notification races, from Roopa Prabhu.

13) Hash UDP fragments consistently, from Paolo Abeni.

14) If TCP receives lots of out of order tiny packets, we do really
    silly stuff. Make the out-of-order queue ending more robust to this
    kind of behavior, from Eric Dumazet.

15) Don't leak netlink dump state in nf_tables, from Florian Westphal.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (76 commits)
  net: axienet: Fix double deregister of mdio
  qmi_wwan: fix interface number for DW5821e production firmware
  ip: in cmsg IP(V6)_ORIGDSTADDR call pskb_may_pull
  bnx2x: Fix invalid memory access in rss hash config path.
  net/mlx4_core: Save the qpn from the input modifier in RST2INIT wrapper
  r8169: restore previous behavior to accept BIOS WoL settings
  cfg80211: never ignore user regulatory hint
  sock: fix sg page frag coalescing in sk_alloc_sg
  netfilter: nf_tables: move dumper state allocation into ->start
  tcp: add tcp_ooo_try_coalesce() helper
  tcp: call tcp_drop() from tcp_data_queue_ofo()
  tcp: detect malicious patterns in tcp_collapse_ofo_queue()
  tcp: avoid collapses in tcp_prune_queue() if possible
  tcp: free batches of packets in tcp_prune_ofo_queue()
  ip: hash fragments consistently
  ipv6: use fib6_info_hold_safe() when necessary
  can: xilinx_can: fix power management handling
  can: xilinx_can: fix incorrect clear of non-processed interrupts
  can: xilinx_can: fix RX overflow interrupt not being enabled
  can: xilinx_can: keep only 1-2 frames in TX FIFO to fix TX accounting
  ...
parents f89ed2f8 03bc7cab
......@@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u64 imm64;
u8 *func;
u32 true_cond;
u32 tmp_idx;
/*
* addrs[] maps a BPF bytecode address into a real offset from
......@@ -637,11 +638,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_STX | BPF_XADD | BPF_W:
/* Get EA into TMP_REG_1 */
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
/* error if EA is not word-aligned */
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
/* add value from src_reg into this */
......@@ -649,32 +646,16 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
/* store result back */
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
/* otherwise, let's try once more */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* exit if the store was not successful */
PPC_LI(b2p[BPF_REG_0], 0);
PPC_BCC(COND_NE, exit_addr);
PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
/* *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW:
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
/* error if EA is not doubleword-aligned */
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
tmp_idx = ctx->idx * 4;
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
PPC_LI(b2p[BPF_REG_0], 0);
PPC_BCC(COND_NE, exit_addr);
PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
/*
......
......@@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
newval->string);
/* disable arp monitoring */
bond->params.arp_interval = 0;
/* set miimon to default value */
bond->params.miimon = BOND_DEFAULT_MIIMON;
netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
bond->params.miimon);
if (!bond_mode_uses_arp(newval->value)) {
if (bond->params.arp_interval) {
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
newval->string);
/* disable arp monitoring */
bond->params.arp_interval = 0;
}
if (!bond->params.miimon) {
/* set miimon to default value */
bond->params.miimon = BOND_DEFAULT_MIIMON;
netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
bond->params.miimon);
}
}
if (newval->value == BOND_MODE_ALB)
......
......@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
int err;
err = pm_runtime_get_sync(priv->device);
if (err)
if (err < 0) {
pm_runtime_put_noidle(priv->device);
return err;
}
return err;
return 0;
}
static void m_can_clk_stop(struct m_can_priv *priv)
......@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
} else {
/* Version 3.1.x or 3.2.x */
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE);
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
CCCR_NISO);
/* Only 3.2.x has NISO Bit implemented */
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
......@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = clk_get_rate(cclk);
priv->mram_base = mram_addr;
m_can_of_parse_mram(priv, mram_config_vals);
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
......@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto clk_disable;
}
m_can_of_parse_mram(priv, mram_config_vals);
devm_can_led_init(dev);
of_can_transceiver(dev);
......@@ -1687,8 +1690,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
return ret;
}
/* TODO: runtime PM with power down or sleep mode */
static __maybe_unused int m_can_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
......@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
m_can_init_ram(priv);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
......@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
if (ret)
return ret;
m_can_init_ram(priv);
m_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
......
......@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
return 0;
}
cdm = of_iomap(np_cdm, 0);
if (!cdm) {
of_node_put(np_cdm);
dev_err(&ofdev->dev, "can't map clock node!\n");
return 0;
}
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;
......
......@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \
((u32)(y) << 16) | \
((u32)(z) << 8))
/* System Control Registers Bits */
#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
......@@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
"%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
hw_ver_major, hw_ver_minor, hw_ver_sub);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
/* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
* 64-bit logical addresses: this workaround forces usage of 32-bit
* DMA addresses only when such a fw is detected.
*/
if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
PCIEFD_FW_VERSION(3, 3, 0)) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
dev_warn(&pdev->dev,
"warning: can't set DMA mask %llxh (err %d)\n",
DMA_BIT_MASK(32), err);
}
#endif
/* stop system clock */
pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
PCIEFD_REG_SYS_CTL_CLR);
......
......@@ -2,6 +2,7 @@
*
* Copyright (C) 2012 - 2014 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
* Copyright (C) 2017 Sandvik Mining and Construction Oy
*
* Description:
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
......@@ -25,8 +26,10 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/can/dev.h>
......@@ -101,7 +104,7 @@ enum xcan_reg {
#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
......@@ -118,6 +121,7 @@ enum xcan_reg {
/**
* struct xcan_priv - This definition define CAN driver instance
* @can: CAN private data structure.
* @tx_lock: Lock for synchronizing TX interrupt handling
* @tx_head: Tx CAN packets ready to send on the queue
* @tx_tail: Tx CAN packets successfully sended on the queue
* @tx_max: Maximum number packets the driver can send
......@@ -132,6 +136,7 @@ enum xcan_reg {
*/
struct xcan_priv {
struct can_priv can;
spinlock_t tx_lock;
unsigned int tx_head;
unsigned int tx_tail;
unsigned int tx_max;
......@@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
.brp_inc = 1,
};
#define XCAN_CAP_WATERMARK 0x0001
struct xcan_devtype_data {
unsigned int caps;
};
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
......@@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
usleep_range(500, 10000);
}
/* reset clears FIFOs */
priv->tx_head = 0;
priv->tx_tail = 0;
return 0;
}
......@@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
u32 id, dlc, data[2] = {0, 0};
unsigned long flags;
if (can_dropped_invalid_skb(ndev, skb))
return NETDEV_TX_OK;
......@@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_head++;
/* Write the Frame to Xilinx CAN TX FIFO */
......@@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
stats->tx_bytes += cf->can_dlc;
}
/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
if (priv->tx_max > 1)
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
/* Check if the TX buffer is full */
if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
netif_stop_queue(ndev);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_OK;
}
......@@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev)
return 1;
}
/**
* xcan_current_error_state - Get current error state from HW
* @ndev: Pointer to net_device structure
*
* Checks the current CAN error state from the HW. Note that this
* only checks for ERROR_PASSIVE and ERROR_WARNING.
*
* Return:
* ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
* otherwise.
*/
static enum can_state xcan_current_error_state(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
return CAN_STATE_ERROR_PASSIVE;
else if (status & XCAN_SR_ERRWRN_MASK)
return CAN_STATE_ERROR_WARNING;
else
return CAN_STATE_ERROR_ACTIVE;
}
/**
* xcan_set_error_state - Set new CAN error state
* @ndev: Pointer to net_device structure
* @new_state: The new CAN state to be set
* @cf: Error frame to be populated or NULL
*
* Set new CAN error state for the device, updating statistics and
* populating the error frame if given.
*/
static void xcan_set_error_state(struct net_device *ndev,
enum can_state new_state,
struct can_frame *cf)
{
struct xcan_priv *priv = netdev_priv(ndev);
u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
u32 txerr = ecr & XCAN_ECR_TEC_MASK;
u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
priv->can.state = new_state;
if (cf) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
switch (new_state) {
case CAN_STATE_ERROR_PASSIVE:
priv->can.can_stats.error_passive++;
if (cf)
cf->data[1] = (rxerr > 127) ?
CAN_ERR_CRTL_RX_PASSIVE :
CAN_ERR_CRTL_TX_PASSIVE;
break;
case CAN_STATE_ERROR_WARNING:
priv->can.can_stats.error_warning++;
if (cf)
cf->data[1] |= (txerr > rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
break;
case CAN_STATE_ERROR_ACTIVE:
if (cf)
cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
break;
default:
/* non-ERROR states are handled elsewhere */
WARN_ON(1);
break;
}
}
/**
* xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
* @ndev: Pointer to net_device structure
*
* If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
* the performed RX/TX has caused it to drop to a lesser state and set
* the interface state accordingly.
*/
static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
enum can_state old_state = priv->can.state;
enum can_state new_state;
/* changing error state due to successful frame RX/TX can only
* occur from these states
*/
if (old_state != CAN_STATE_ERROR_WARNING &&
old_state != CAN_STATE_ERROR_PASSIVE)
return;
new_state = xcan_current_error_state(ndev);
if (new_state != old_state) {
struct sk_buff *skb;
struct can_frame *cf;
skb = alloc_can_err_skb(ndev, &cf);
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
if (skb) {
struct net_device_stats *stats = &ndev->stats;
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
netif_rx(skb);
}
}
}
/**
* xcan_err_interrupt - error frame Isr
* @ndev: net_device pointer
......@@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
u32 err_status, status, txerr = 0, rxerr = 0;
u32 err_status;
skb = alloc_can_err_skb(ndev, &cf);
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
status = priv->read_reg(priv, XCAN_SR_OFFSET);
if (isr & XCAN_IXR_BSOFF_MASK) {
priv->can.state = CAN_STATE_BUS_OFF;
......@@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
} else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (rxerr > 127) ?
CAN_ERR_CRTL_RX_PASSIVE :
CAN_ERR_CRTL_TX_PASSIVE;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
} else if (status & XCAN_SR_ERRWRN_MASK) {
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= (txerr > rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
} else {
enum can_state new_state = xcan_current_error_state(ndev);
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
}
/* Check for Arbitration lost interrupt */
......@@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
if (isr & XCAN_IXR_RXOFLW_MASK) {
stats->rx_over_errors++;
stats->rx_errors++;
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
......@@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
if (isr & XCAN_IXR_RXOK_MASK) {
priv->write_reg(priv, XCAN_ICR_OFFSET,
XCAN_IXR_RXOK_MASK);
work_done += xcan_rx(ndev);
} else {
priv->write_reg(priv, XCAN_ICR_OFFSET,
XCAN_IXR_RXNEMP_MASK);
break;
}
work_done += xcan_rx(ndev);
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
if (work_done)
if (work_done) {
can_led_event(ndev, CAN_LED_EVENT_RX);
xcan_update_error_state_after_rxtx(ndev);
}
if (work_done < quota) {
napi_complete_done(napi, work_done);
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
ier |= XCAN_IXR_RXNEMP_MASK;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
}
return work_done;
......@@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
unsigned int frames_in_fifo;
int frames_sent = 1; /* TXOK => at least 1 frame was sent */
unsigned long flags;
int retries = 0;
/* Synchronize with xmit as we need to know the exact number
* of frames in the FIFO to stay in sync due to the TXFEMP
* handling.
* This also prevents a race between netif_wake_queue() and
* netif_stop_queue().
*/
spin_lock_irqsave(&priv->tx_lock, flags);
frames_in_fifo = priv->tx_head - priv->tx_tail;
if (WARN_ON_ONCE(frames_in_fifo == 0)) {
/* clear TXOK anyway to avoid getting back here */
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return;
}
/* Check if 2 frames were sent (TXOK only means that at least 1
* frame was sent).
*/
if (frames_in_fifo > 1) {
WARN_ON(frames_in_fifo > priv->tx_max);
/* Synchronize TXOK and isr so that after the loop:
* (1) isr variable is up-to-date at least up to TXOK clear
* time. This avoids us clearing a TXOK of a second frame
* but not noticing that the FIFO is now empty and thus
* marking only a single frame as sent.
* (2) No TXOK is left. Having one could mean leaving a
* stray TXOK as we might process the associated frame
* via TXFEMP handling as we read TXFEMP *after* TXOK
* clear to satisfy (1).
*/
while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
while ((priv->tx_head - priv->tx_tail > 0) &&
(isr & XCAN_IXR_TXOK_MASK)) {
if (isr & XCAN_IXR_TXFEMP_MASK) {
/* nothing in FIFO anymore */
frames_sent = frames_in_fifo;
}
} else {
/* single frame in fifo, just clear TXOK */
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
}
while (frames_sent--) {
can_get_echo_skb(ndev, priv->tx_tail %
priv->tx_max);
priv->tx_tail++;
stats->tx_packets++;
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
can_led_event(ndev, CAN_LED_EVENT_TX);
netif_wake_queue(ndev);
spin_unlock_irqrestore(&priv->tx_lock, flags);
can_led_event(ndev, CAN_LED_EVENT_TX);
xcan_update_error_state_after_rxtx(ndev);
}
/**
......@@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev);
u32 isr, ier;
u32 isr_errors;