Commit 54704614 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Use an appropriate TSQ pacing shift in mac80211, from Toke
    Høiland-Jørgensen.

 2) Just like ipv4's ip_route_me_harder(), we have to use skb_to_full_sk
    in ip6_route_me_harder, from Eric Dumazet.

 3) Fix several shutdown races and similar other problems in l2tp, from
    James Chapman.

 4) Handle missing XDP flush properly in tuntap, for real this time.
    From Jason Wang.

 5) Out-of-bounds access in powerpc ebpf tailcalls, from Daniel
    Borkmann.

 6) Fix phy_resume() locking, from Andrew Lunn.

 7) IFLA_MTU values are ignored on newlink for some tunnel types, fix
    from Xin Long.

 8) Revert F-RTO middle box workarounds, they only handle one dimension
    of the problem. From Yuchung Cheng.

 9) Fix socket refcounting in RDS, from Ka-Cheong Poon.

10) Don't allow ppp unit registration to an unregistered channel, from
    Guillaume Nault.

11) Various hv_netvsc fixes from Stephen Hemminger.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (98 commits)
  hv_netvsc: propagate rx filters to VF
  hv_netvsc: filter multicast/broadcast
  hv_netvsc: defer queue selection to VF
  hv_netvsc: use napi_schedule_irqoff
  hv_netvsc: fix race in napi poll when rescheduling
  hv_netvsc: cancel subchannel setup before halting device
  hv_netvsc: fix error unwind handling if vmbus_open fails
  hv_netvsc: only wake transmit queue if link is up
  hv_netvsc: avoid retry on send during shutdown
  virtio-net: re enable XDP_REDIRECT for mergeable buffer
  ppp: prevent unregistered channels from connecting to PPP units
  tc-testing: skbmod: fix match value of ethertype
  mlxsw: spectrum_switchdev: Check success of FDB add operation
  net: make skb_gso_*_seglen functions private
  net: xfrm: use skb_gso_validate_network_len() to check gso sizes
  net: sched: tbf: handle GSO_BY_FRAGS case in enqueue
  net: rename skb_gso_validate_mtu -> skb_gso_validate_network_len
  rds: Incorrect reference counting in TCP socket creation
  net: ethtool: don't ignore return from driver get_fecparam method
  vrf: check forwarding on the original netdevice when generating ICMP dest unreachable
  ...
parents 661e50bc a7f0fb1b
......@@ -18,6 +18,7 @@ Required properties:
- "renesas,etheravb-r8a7795" for the R8A7795 SoC.
- "renesas,etheravb-r8a7796" for the R8A7796 SoC.
- "renesas,etheravb-r8a77970" for the R8A77970 SoC.
- "renesas,etheravb-r8a77980" for the R8A77980 SoC.
- "renesas,etheravb-r8a77995" for the R8A77995 SoC.
- "renesas,etheravb-rcar-gen3" as a fallback for the above
R-Car Gen3 devices.
......
......@@ -58,7 +58,6 @@ config MACH_KUROBOX_PRO
config MACH_DNS323
bool "D-Link DNS-323"
select GENERIC_NET_UTILS
select I2C_BOARDINFO if I2C
help
Say 'Y' here if you want your kernel to support the
......@@ -66,7 +65,6 @@ config MACH_DNS323
config MACH_TS209
bool "QNAP TS-109/TS-209"
select GENERIC_NET_UTILS
help
Say 'Y' here if you want your kernel to support the
QNAP TS-109/TS-209 platform.
......@@ -101,7 +99,6 @@ config MACH_LINKSTATION_LS_HGL
config MACH_TS409
bool "QNAP TS-409"
select GENERIC_NET_UTILS
help
Say 'Y' here if you want your kernel to support the
QNAP TS-409 platform.
......
......@@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these
* functions be kept somewhere?
*/
static int __init dns323_parse_hex_nibble(char n)
{
if (n >= '0' && n <= '9')
return n - '0';
if (n >= 'A' && n <= 'F')
return n - 'A' + 10;
if (n >= 'a' && n <= 'f')
return n - 'a' + 10;
return -1;
}
static int __init dns323_parse_hex_byte(const char *b)
{
int hi;
int lo;
hi = dns323_parse_hex_nibble(b[0]);
lo = dns323_parse_hex_nibble(b[1]);
if (hi < 0 || lo < 0)
return -1;
return (hi << 4) | lo;
}
static int __init dns323_read_mac_addr(void)
{
u_int8_t addr[6];
void __iomem *mac_page;
int i;
char *mac_page;
/* MAC address is stored as a regular ol' string in /dev/mtdblock4
* (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80).
......@@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void)
if (!mac_page)
return -ENOMEM;
if (!mac_pton((__force const char *) mac_page, addr))
goto error_fail;
/* Sanity check the string we're looking at */
for (i = 0; i < 5; i++) {
if (*(mac_page + (i * 3) + 2) != ':') {
goto error_fail;
}
}
for (i = 0; i < 6; i++) {
int byte;
byte = dns323_parse_hex_byte(mac_page + (i * 3));
if (byte < 0) {
goto error_fail;
}
addr[i] = byte;
}
iounmap(mac_page);
printk("DNS-323: Found ethernet MAC address: %pM\n", addr);
......
......@@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
static int __init qnap_tsx09_parse_hex_nibble(char n)
{
if (n >= '0' && n <= '9')
return n - '0';
if (n >= 'A' && n <= 'F')
return n - 'A' + 10;
if (n >= 'a' && n <= 'f')
return n - 'a' + 10;
return -1;
}
static int __init qnap_tsx09_parse_hex_byte(const char *b)
{
int hi;
int lo;
hi = qnap_tsx09_parse_hex_nibble(b[0]);
lo = qnap_tsx09_parse_hex_nibble(b[1]);
if (hi < 0 || lo < 0)
return -1;
return (hi << 4) | lo;
}
static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
{
u_int8_t addr[6];
int i;
if (!mac_pton(addr_str, addr))
return -1;
for (i = 0; i < 6; i++) {
int byte;
/*
* Enforce "xx:xx:xx:xx:xx:xx\n" format.
*/
if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
return -1;
byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
if (byte < 0)
return -1;
addr[i] = byte;
}
printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
......@@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size)
unsigned long addr;
for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
void __iomem *nor_page;
char *nor_page;
int ret = 0;
nor_page = ioremap(addr, 1024);
if (nor_page != NULL) {
ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page);
ret = qnap_tsx09_check_mac_addr(nor_page);
iounmap(nor_page);
}
......
......@@ -240,6 +240,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* goto out;
*/
PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
PPC_BCC(COND_GE, out);
......
......@@ -21,6 +21,7 @@
*
*/
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
......@@ -379,6 +380,21 @@ static const struct usb_device_id blacklist_table[] = {
{ } /* Terminating entry */
};
/* The Bluetooth USB module build into some devices needs to be reset on resume,
* this is a problem with the platform (likely shutting off all power) not with
* the module itself. So we use a DMI list to match known broken platforms.
*/
static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
{
/* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
},
},
{}
};
#define BTUSB_MAX_ISOC_FRAMES 10
#define BTUSB_INTR_RUNNING 0
......@@ -2945,6 +2961,9 @@ static int btusb_probe(struct usb_interface *intf,
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
if (dmi_check_system(btusb_needs_reset_resume_table))
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
if (err)
......@@ -3031,12 +3050,6 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
/* QCA Rome devices lose their updated firmware over suspend,
* but the USB hub doesn't notice any status change.
* explicitly request a device reset on resume.
*/
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
#ifdef CONFIG_BT_HCIBTUSB_RTL
......
......@@ -922,12 +922,13 @@ static int bcm_get_resources(struct bcm_device *dev)
dev->clk = devm_clk_get(dev->dev, NULL);
dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup",
GPIOD_OUT_LOW);
dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
GPIOD_OUT_LOW);
if (IS_ERR(dev->device_wakeup))
return PTR_ERR(dev->device_wakeup);
dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW);
dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
GPIOD_OUT_LOW);
if (IS_ERR(dev->shutdown))
return PTR_ERR(dev->shutdown);
......
......@@ -3063,9 +3063,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
if (ndev->features & NETIF_F_RXCSUM)
gfar_rx_checksum(skb, fcb);
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, ndev);
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
......@@ -3136,13 +3133,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
continue;
}
gfar_process_frame(ndev, skb);
/* Increment the number of packets */
total_pkts++;
total_bytes += skb->len;
skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(ndev, skb);
skb->protocol = eth_type_trans(skb, ndev);
/* Send the packet up the stack */
napi_gro_receive(&rx_queue->grp->napi_rx, skb);
......
......@@ -1888,6 +1888,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR);
} else if (ring_uses_build_skb(rx_ring)) {
unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
offset,
skb_headlen(skb),
DMA_FROM_DEVICE);
} else {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
......
......@@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8),
MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
};
#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38
#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
struct mlxsw_afk_element_inst { /* element instance in actual block */
const struct mlxsw_afk_element_info *info;
......
......@@ -1459,6 +1459,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
}
mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
mlxsw_sp_port_vlan->ref_count = 1;
mlxsw_sp_port_vlan->vid = vid;
list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
......@@ -1486,8 +1487,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (mlxsw_sp_port_vlan)
if (mlxsw_sp_port_vlan) {
mlxsw_sp_port_vlan->ref_count++;
return mlxsw_sp_port_vlan;
}
return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
}
......@@ -1496,6 +1499,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
if (--mlxsw_sp_port_vlan->ref_count != 0)
return;
if (mlxsw_sp_port_vlan->bridge_port)
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
else if (fid)
......@@ -4207,13 +4213,12 @@ static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
.size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
};
static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
struct devlink_resource_size_params *linear_size_params,
struct devlink_resource_size_params *hash_double_size_params,
struct devlink_resource_size_params *hash_single_size_params)
{
u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
KVD_SINGLE_MIN_SIZE);
......@@ -4222,37 +4227,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
u32 linear_size_min = 0;
/* KVD top resource */
mlxsw_sp_kvd_size_params.size_min = kvd_size;
mlxsw_sp_kvd_size_params.size_max = kvd_size;
mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
/* Linear part init */
mlxsw_sp_linear_size_params.size_min = linear_size_min;
mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
double_size_min;
mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
/* Hash double part init */
mlxsw_sp_hash_double_size_params.size_min = double_size_min;
mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
linear_size_min;
mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
/* Hash single part init */
mlxsw_sp_hash_single_size_params.size_min = single_size_min;
mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
linear_size_min;
mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
MLXSW_SP_KVD_GRANULARITY,
DEVLINK_RESOURCE_UNIT_ENTRY);
devlink_resource_size_params_init(linear_size_params, linear_size_min,
kvd_size - single_size_min -
double_size_min,
MLXSW_SP_KVD_GRANULARITY,
DEVLINK_RESOURCE_UNIT_ENTRY);
devlink_resource_size_params_init(hash_double_size_params,
double_size_min,
kvd_size - single_size_min -
linear_size_min,
MLXSW_SP_KVD_GRANULARITY,
DEVLINK_RESOURCE_UNIT_ENTRY);
devlink_resource_size_params_init(hash_single_size_params,
single_size_min,
kvd_size - double_size_min -
linear_size_min,
MLXSW_SP_KVD_GRANULARITY,
DEVLINK_RESOURCE_UNIT_ENTRY);
}
static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
struct devlink_resource_size_params hash_single_size_params;
struct devlink_resource_size_params hash_double_size_params;
struct devlink_resource_size_params linear_size_params;
struct devlink_resource_size_params kvd_size_params;
u32 kvd_size, single_size, double_size, linear_size;
const struct mlxsw_config_profile *profile;
int err;
......@@ -4261,13 +4264,17 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
return -EIO;
mlxsw_sp_resource_size_params_prepare(mlxsw_core);
mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
&linear_size_params,
&hash_double_size_params,
&hash_single_size_params);
kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
true, kvd_size,
MLXSW_SP_RESOURCE_KVD,
DEVLINK_RESOURCE_ID_PARENT_TOP,
&mlxsw_sp_kvd_size_params,
&kvd_size_params,
&mlxsw_sp_resource_kvd_ops);
if (err)
return err;
......@@ -4277,7 +4284,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
false, linear_size,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD,
&mlxsw_sp_linear_size_params,
&linear_size_params,
&mlxsw_sp_resource_kvd_linear_ops);
if (err)
return err;
......@@ -4291,7 +4298,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
false, double_size,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
MLXSW_SP_RESOURCE_KVD,
&mlxsw_sp_hash_double_size_params,
&hash_double_size_params,
&mlxsw_sp_resource_kvd_hash_double_ops);
if (err)
return err;
......@@ -4301,7 +4308,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
false, single_size,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD,
&mlxsw_sp_hash_single_size_params,
&hash_single_size_params,
&mlxsw_sp_resource_kvd_hash_single_ops);
if (err)
return err;
......
......@@ -211,6 +211,7 @@ struct mlxsw_sp_port_vlan {
struct list_head list;
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp_fid *fid;
unsigned int ref_count;
u16 vid;
struct mlxsw_sp_bridge_port *bridge_port;
struct list_head bridge_vlan_node;
......
......@@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
[MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
[MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
};
static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
};
static const int *mlxsw_sp_packet_type_sfgc_types[] = {
......
......@@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool dynamic)
{
char *sfd_pl;
u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
......@@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, action, local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl);
if (err)
goto out;
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
err = -EBUSY;
out:
kfree(sfd_pl);
return err;
}
......@@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
bool adding, bool dynamic)
{
char *sfd_pl;
u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
......@@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
lag_vid, lag_id);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl);
if (err)
goto out;
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
err = -EBUSY;
out:
kfree(sfd_pl);
return err;
}
......@@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
u16 fid, u16 mid_idx, bool adding)
{
char *sfd_pl;
u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
......@@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
goto out;
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
err = -EBUSY;
out:
kfree(sfd_pl);
return err;
}
......
......@@ -439,6 +439,17 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
enum_index);
}
static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
int enum_index)
{
iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
}
static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
{
return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
}
static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
return mdp->reg_offset == sh_eth_offset_gigabit;
......
......@@ -567,15 +567,4 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
return mdp->tsu_addr + mdp->reg_offset[enum_index];
}
static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
int enum_index)
{
iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
}
static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
{
return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
}
#endif /* #ifndef __SH_ETH_H__ */
......@@ -852,13 +852,6 @@ int netvsc_send(struct net_device *ndev,
if (unlikely(!net_device || net_device->destroy))
return -ENODEV;
/* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
* here before the negotiation with the host is finished and
* send_section_map may not be allocated yet.
*/
if (unlikely(!net_device->send_section_map))
return -EAGAIN;
nvchan = &net_device->chan_table[packet->q_idx];
packet->send_buf_index = NETVSC_INVALID_INDEX;
packet->cp_partial = false;
......@@ -866,10 +859,8 @@ int netvsc_send(struct net_device *ndev,
/* Send control message directly without accessing msd (Multi-Send
* Data) field which may be changed during data packet processing.
*/
if (!skb) {
cur_send = packet;
goto send_now;
}
if (!skb)
return netvsc_send_pkt(device, packet, net_device, pb, skb);
/* batch packets in send buffer if possible */
msdp = &nvchan->msd;
......@@ -953,7 +944,6 @@ int netvsc_send(struct net_device *ndev,
}
}
send_now:
if (cur_send)
ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
......@@ -1217,9 +1207,10 @@ int netvsc_poll(struct napi_struct *napi, int budget)
if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
work_done < budget &&
napi_complete_done(napi, work_done) &&
hv_end_read(&channel->inbound)) {
hv_end_read(&channel->inbound) &&
napi_schedule_prep(napi)) {
hv_begin_read(&channel->inbound);
napi_reschedule(napi);
__napi_schedule(napi);
}
/* Driver may overshoot since multiple packets per descriptor */
......@@ -1242,7 +1233,7 @@ void netvsc_channel_cb(void *context)
/* disable interupts from host */
hv_begin_read(rbi);
__napi_schedule(&nvchan->napi);
__napi_schedule_irqoff(&nvchan->napi);
}
}
......@@ -1296,7 +1287,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
netvsc_channel_cb, net_device->chan_table);
if (ret != 0) {
netif_napi_del(&net_device->chan_table[0].napi);
netdev_err(ndev, "unable to open channel: %d\n", ret);
goto cleanup;
}
......@@ -1306,11 +1296,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
napi_enable(&net_device->chan_table[0].napi);
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated.
*/