Commit 7f68d430 authored by Shannon Nelson's avatar Shannon Nelson Committed by Jeff Kirsher

ixgbevf: enable VF IPsec offload operations

Add the IPsec initialization into the driver startup and
add the Rx and Tx processing hooks.
Signed-off-by: default avatarShannon Nelson <shannon.nelson@oracle.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 0062e7cc
...@@ -234,7 +234,7 @@ union ixgbe_adv_rx_desc { ...@@ -234,7 +234,7 @@ union ixgbe_adv_rx_desc {
/* Context descriptors */ /* Context descriptors */
struct ixgbe_adv_tx_context_desc { struct ixgbe_adv_tx_context_desc {
__le32 vlan_macip_lens; __le32 vlan_macip_lens;
__le32 seqnum_seed; __le32 fceof_saidx;
__le32 type_tucmd_mlhl; __le32 type_tucmd_mlhl;
__le32 mss_l4len_idx; __le32 mss_l4len_idx;
}; };
......
...@@ -55,6 +55,8 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = { ...@@ -55,6 +55,8 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
IXGBEVF_STAT("alloc_rx_page", alloc_rx_page), IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed), IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
IXGBEVF_STAT("tx_ipsec", tx_ipsec),
IXGBEVF_STAT("rx_ipsec", rx_ipsec),
}; };
#define IXGBEVF_QUEUE_STATS_LEN ( \ #define IXGBEVF_QUEUE_STATS_LEN ( \
......
...@@ -459,6 +459,31 @@ int ethtool_ioctl(struct ifreq *ifr); ...@@ -459,6 +459,31 @@ int ethtool_ioctl(struct ifreq *ifr);
extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
#ifdef CONFIG_XFRM_OFFLOAD
void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter);
void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter);
void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter);
void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb);
int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd);
#else
static inline void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
{ }
static inline void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter)
{ }
static inline void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) { }
static inline void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb) { }
static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
{ return 0; }
#endif /* CONFIG_XFRM_OFFLOAD */
void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
......
...@@ -40,7 +40,7 @@ static const char ixgbevf_driver_string[] = ...@@ -40,7 +40,7 @@ static const char ixgbevf_driver_string[] =
#define DRV_VERSION "4.1.0-k" #define DRV_VERSION "4.1.0-k"
const char ixgbevf_driver_version[] = DRV_VERSION; const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] = static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2015 Intel Corporation."; "Copyright (c) 2009 - 2018 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = { static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info, [board_82599_vf] = &ixgbevf_82599_vf_info,
...@@ -268,7 +268,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -268,7 +268,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbevf_tx_buffer *tx_buffer; struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc; union ixgbe_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
unsigned int budget = tx_ring->count / 2; unsigned int budget = tx_ring->count / 2;
unsigned int i = tx_ring->next_to_clean; unsigned int i = tx_ring->next_to_clean;
...@@ -299,6 +299,8 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -299,6 +299,8 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
/* update the statistics for this packet */ /* update the statistics for this packet */
total_bytes += tx_buffer->bytecount; total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs; total_packets += tx_buffer->gso_segs;
if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
total_ipsec++;
/* free the skb */ /* free the skb */
if (ring_is_xdp(tx_ring)) if (ring_is_xdp(tx_ring))
...@@ -361,6 +363,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -361,6 +363,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
u64_stats_update_end(&tx_ring->syncp); u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets; q_vector->tx.total_packets += total_packets;
adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
...@@ -516,6 +519,9 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, ...@@ -516,6 +519,9 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
} }
if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
skb->protocol = eth_type_trans(skb, rx_ring->netdev); skb->protocol = eth_type_trans(skb, rx_ring->netdev);
} }
...@@ -1012,7 +1018,7 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, ...@@ -1012,7 +1018,7 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
context_desc->vlan_macip_lens = context_desc->vlan_macip_lens =
cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
context_desc->seqnum_seed = 0; context_desc->fceof_saidx = 0;
context_desc->type_tucmd_mlhl = context_desc->type_tucmd_mlhl =
cpu_to_le32(IXGBE_TXD_CMD_DEXT | cpu_to_le32(IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT); IXGBE_ADVTXD_DTYP_CTXT);
...@@ -2200,6 +2206,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) ...@@ -2200,6 +2206,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
ixgbevf_set_rx_mode(adapter->netdev); ixgbevf_set_rx_mode(adapter->netdev);
ixgbevf_restore_vlan(adapter); ixgbevf_restore_vlan(adapter);
ixgbevf_ipsec_restore(adapter);
ixgbevf_configure_tx(adapter); ixgbevf_configure_tx(adapter);
ixgbevf_configure_rx(adapter); ixgbevf_configure_rx(adapter);
...@@ -2246,7 +2253,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) ...@@ -2246,7 +2253,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
int api[] = { ixgbe_mbox_api_13, int api[] = { ixgbe_mbox_api_14,
ixgbe_mbox_api_13,
ixgbe_mbox_api_12, ixgbe_mbox_api_12,
ixgbe_mbox_api_11, ixgbe_mbox_api_11,
ixgbe_mbox_api_10, ixgbe_mbox_api_10,
...@@ -2605,6 +2613,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) ...@@ -2605,6 +2613,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_11: case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
if (adapter->xdp_prog && if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss) hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1; rss = rss > 3 ? 2 : 1;
...@@ -3700,8 +3709,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) ...@@ -3700,8 +3709,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
} }
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
u32 vlan_macip_lens, u32 type_tucmd, u32 vlan_macip_lens, u32 fceof_saidx,
u32 mss_l4len_idx) u32 type_tucmd, u32 mss_l4len_idx)
{ {
struct ixgbe_adv_tx_context_desc *context_desc; struct ixgbe_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
...@@ -3715,14 +3724,15 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, ...@@ -3715,14 +3724,15 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = 0; context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
} }
static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first, struct ixgbevf_tx_buffer *first,
u8 *hdr_len) u8 *hdr_len,
struct ixgbevf_ipsec_tx_data *itd)
{ {
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
...@@ -3736,6 +3746,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3736,6 +3746,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
unsigned char *hdr; unsigned char *hdr;
} l4; } l4;
u32 paylen, l4_offset; u32 paylen, l4_offset;
u32 fceof_saidx = 0;
int err; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
...@@ -3761,13 +3772,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3761,13 +3772,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (ip.v4->version == 4) { if (ip.v4->version == 4) {
unsigned char *csum_start = skb_checksum_start(skb); unsigned char *csum_start = skb_checksum_start(skb);
unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
int len = csum_start - trans_start;
/* IP header will have to cancel out any data that /* IP header will have to cancel out any data that
* is not a part of the outer IP header * is not a part of the outer IP header, so set to
* a reverse csum if needed, else init check to 0.
*/ */
ip.v4->check = csum_fold(csum_partial(trans_start, ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
csum_start - trans_start, csum_fold(csum_partial(trans_start,
0)); len, 0)) : 0;
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0; ip.v4->tot_len = 0;
...@@ -3799,13 +3812,16 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3799,13 +3812,16 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
fceof_saidx |= itd->pfsa;
type_tucmd |= itd->flags | itd->trailer_len;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens = l4.hdr - ip.hdr;
vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
type_tucmd, mss_l4len_idx); mss_l4len_idx);
return 1; return 1;
} }
...@@ -3820,10 +3836,12 @@ static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) ...@@ -3820,10 +3836,12 @@ static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
} }
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first) struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
{ {
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0; u32 vlan_macip_lens = 0;
u32 fceof_saidx = 0;
u32 type_tucmd = 0; u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
...@@ -3862,7 +3880,11 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, ...@@ -3862,7 +3880,11 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); fceof_saidx |= itd->pfsa;
type_tucmd |= itd->flags | itd->trailer_len;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
fceof_saidx, type_tucmd, 0);
} }
static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
...@@ -3896,8 +3918,12 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, ...@@ -3896,8 +3918,12 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
if (tx_flags & IXGBE_TX_FLAGS_IPV4) if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
/* use index 1 context for TSO/FSO/FCOE */ /* enable IPsec */
if (tx_flags & IXGBE_TX_FLAGS_TSO) if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
/* use index 1 context for TSO/FSO/FCOE/IPSEC */
if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
/* Check Context must be set if Tx switch is enabled, which it /* Check Context must be set if Tx switch is enabled, which it
...@@ -4079,6 +4105,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, ...@@ -4079,6 +4105,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
int tso; int tso;
u32 tx_flags = 0; u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb)); u16 count = TXD_USE_COUNT(skb_headlen(skb));
struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f; unsigned short f;
#endif #endif
...@@ -4123,11 +4150,15 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, ...@@ -4123,11 +4150,15 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
first->tx_flags = tx_flags; first->tx_flags = tx_flags;
first->protocol = vlan_get_protocol(skb); first->protocol = vlan_get_protocol(skb);
tso = ixgbevf_tso(tx_ring, first, &hdr_len); #ifdef CONFIG_XFRM_OFFLOAD
if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
if (tso < 0) if (tso < 0)
goto out_drop; goto out_drop;
else if (!tso) else if (!tso)
ixgbevf_tx_csum(tx_ring, first); ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
ixgbevf_tx_map(tx_ring, first, hdr_len); ixgbevf_tx_map(tx_ring, first, hdr_len);
...@@ -4638,6 +4669,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4638,6 +4669,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_11: case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN); (ETH_HLEN + ETH_FCS_LEN);
break; break;
...@@ -4673,6 +4705,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4673,6 +4705,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, netdev); pci_set_drvdata(pdev, netdev);
netif_carrier_off(netdev); netif_carrier_off(netdev);
ixgbevf_init_ipsec_offload(adapter);
ixgbevf_init_last_counter_stats(adapter); ixgbevf_init_last_counter_stats(adapter);
...@@ -4739,6 +4772,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) ...@@ -4739,6 +4772,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED) if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev); unregister_netdev(netdev);
ixgbevf_stop_ipsec_offload(adapter);
ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_clear_interrupt_scheme(adapter);
ixgbevf_reset_interrupt_capability(adapter); ixgbevf_reset_interrupt_capability(adapter);
......
...@@ -309,6 +309,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) ...@@ -309,6 +309,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type. * is not supported for this device type.
*/ */
switch (hw->api_version) { switch (hw->api_version) {
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf) if (hw->mac.type < ixgbe_mac_X550_vf)
...@@ -376,6 +377,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) ...@@ -376,6 +377,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type. * or if the operation is not supported for this device type.
*/ */
switch (hw->api_version) { switch (hw->api_version) {
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf) if (hw->mac.type < ixgbe_mac_X550_vf)
...@@ -540,6 +542,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) ...@@ -540,6 +542,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Fall threw */ /* Fall threw */
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
break; break;
default: default:
...@@ -890,6 +893,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, ...@@ -890,6 +893,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_11: case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
break; break;
default: default:
return 0; return 0;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment