Merge tag 'net-6.19-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
 "Including fixes from CAN and wireless.

  Pretty big, but hard to make up any cohesive story that would explain
  it, a random collection of fixes. The two reverts of bad patches from
  this release here feel like stuff that'd normally show up by rc5 or
  rc6. Perhaps obvious thing to say, given the holiday timing.

  That said, no active investigations / regressions. Let's see what the
  next week brings.

  Current release - fix to a fix:

   - can: alloc_candev_mqs(): add missing default CAN capabilities

  Current release - regressions:

   - usbnet: fix crash due to missing BQL accounting after resume

   - Revert "net: wwan: mhi_wwan_mbim: Avoid -Wflex-array-member-not ...

  Previous releases - regressions:

   - Revert "nfc/nci: Add the inconsistency check between the input ...

  Previous releases - always broken:

   - number of driver fixes for incorrect use of seqlocks on stats

   - rxrpc: fix recvmsg() unconditional requeue, don't corrupt rcv queue
     when MSG_PEEK was set

   - ipvlan: make the addrs_lock be per port avoid races in the port
     hash table

   - sched: enforce that teql can only be used as root qdisc

   - virtio: coalesce only linear skb

   - wifi: ath12k: fix dead lock while flushing management frames

   - eth: igc: reduce TSN TX packet buffer from 7KB to 5KB per queue"

* tag 'net-6.19-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (96 commits)
  Octeontx2-af: Add proper checks for fwdata
  dpll: Prevent duplicate registrations
  net/sched: act_ife: avoid possible NULL deref
  hinic3: Fix netif_queue_set_napi queue_index input parameter error
  vsock/test: add stream TX credit bounds test
  vsock/virtio: cap TX credit to local buffer size
  vsock/test: fix seqpacket message bounds test
  vsock/virtio: fix potential underflow in virtio_transport_get_credit()
  net: fec: account for VLAN header in frame length calculations
  net: openvswitch: fix data race in ovs_vport_get_upcall_stats
  octeontx2-af: Fix error handling
  net: pcs: pcs-mtk-lynxi: report in-band capability for 2500Base-X
  rxrpc: Fix data-race warning and potential load/store tearing
  net: dsa: fix off-by-one in maximum bridge ID determination
  net: bcmasp: Fix network filter wake for asp-3.0
  bonding: provide a net pointer to __skb_flow_dissect()
  selftests: net: amt: wait longer for connection before sending packets
  be2net: Fix NULL pointer dereference in be_cmd_get_mac_from_list
  Revert "net: wwan: mhi_wwan_mbim: Avoid -Wflex-array-member-not-at-end warning"
  netrom: fix double-free in nr_route_frame()
  ...
This commit is contained in:
Linus Torvalds
2026-01-22 09:32:11 -08:00
108 changed files with 1144 additions and 472 deletions

View File

@@ -2231,6 +2231,10 @@ S: Markham, Ontario
S: L3R 8B2
S: Canada
N: Krzysztof Kozlowski
E: krzk@kernel.org
D: NFC network subsystem and drivers maintainer
N: Christian Krafft
D: PowerPC Cell support

View File

@@ -39,6 +39,8 @@ attribute-sets:
-
name: ipproto
type: u8
checks:
min: 1
-
name: type
type: u8

View File

@@ -363,6 +363,18 @@ just do it. As a result, a sequence of smaller series gets merged quicker and
with better review coverage. Re-posting large series also increases the mailing
list traffic.
Limit patches outstanding on mailing list
-----------------------------------------
Avoid having more than 15 patches, across all series, outstanding for
review on the mailing list for a single tree. In other words, a maximum of
15 patches under review on net, and a maximum of 15 patches under review on
net-next.
This limit is intended to focus developer effort on testing patches before
upstream review. Aiding the quality of upstream submissions, and easing the
load on reviewers.
.. _rcs:
Local variable ordering ("reverse xmas tree", "RCS")

View File

@@ -18486,9 +18486,8 @@ F: include/uapi/linux/nexthop.h
F: net/ipv4/nexthop.c
NFC SUBSYSTEM
M: Krzysztof Kozlowski <krzk@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
S: Orphan
F: Documentation/devicetree/bindings/net/nfc/
F: drivers/nfc/
F: include/net/nfc/

View File

@@ -83,10 +83,8 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
if (ref->pin != pin)
continue;
reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
}
if (reg)
return -EEXIST;
ref_exists = true;
break;
}
@@ -164,10 +162,8 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
if (ref->dpll != dpll)
continue;
reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
}
if (reg)
return -EEXIST;
ref_exists = true;
break;
}

View File

@@ -109,14 +109,14 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
spin_unlock_irq(&dev->lock);
if (filep->f_flags & O_NONBLOCK)
return -EAGAIN;
wait_event_interruptible(dev->wait, (dev->work ||
wait_event_interruptible(dev->wait, (READ_ONCE(dev->work) ||
!list_empty(list)));
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&dev->lock);
}
if (dev->work)
dev->work = 0;
WRITE_ONCE(dev->work, 0);
if (!list_empty(list)) {
timer = list_first_entry(list, struct mISDNtimer, list);
list_del(&timer->list);
@@ -141,13 +141,16 @@ mISDN_poll(struct file *filep, poll_table *wait)
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait);
if (dev) {
u32 work;
poll_wait(filep, &dev->wait, wait);
mask = 0;
if (dev->work || !list_empty(&dev->expired))
work = READ_ONCE(dev->work);
if (work || !list_empty(&dev->expired))
mask |= (EPOLLIN | EPOLLRDNORM);
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__,
dev->work, list_empty(&dev->expired));
work, list_empty(&dev->expired));
}
return mask;
}
@@ -172,7 +175,7 @@ misdn_add_timer(struct mISDNtimerdev *dev, int timeout)
struct mISDNtimer *timer;
if (!timeout) {
dev->work = 1;
WRITE_ONCE(dev->work, 1);
wake_up_interruptible(&dev->wait);
id = 0;
} else {

View File

@@ -1862,6 +1862,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
*/
if (!bond_has_slaves(bond)) {
if (bond_dev->type != slave_dev->type) {
if (slave_dev->type != ARPHRD_ETHER &&
BOND_MODE(bond) == BOND_MODE_8023AD) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
"8023AD mode requires Ethernet devices");
return -EINVAL;
}
slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
bond_dev->type, slave_dev->type);
@@ -4090,8 +4096,9 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
case BOND_XMIT_POLICY_ENCAP23:
case BOND_XMIT_POLICY_ENCAP34:
memset(fk, 0, sizeof(*fk));
return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
fk, data, l2_proto, nhoff, hlen, 0);
return __skb_flow_dissect(dev_net(bond->dev), skb,
&flow_keys_bonding, fk, data,
l2_proto, nhoff, hlen, 0);
default:
break;
}

View File

@@ -332,6 +332,7 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
can_set_ml_priv(dev, can_ml);
can_set_cap(dev, CAN_CAP_CC);
if (echo_skb_max) {
priv->echo_skb_max = echo_skb_max;

View File

@@ -486,11 +486,17 @@ resubmit_urb:
urb->transfer_buffer, RX_BUFFER_SIZE,
ems_usb_read_bulk_callback, dev);
usb_anchor_urb(urb, &dev->rx_submitted);
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (!retval)
return;
usb_unanchor_urb(urb);
if (retval == -ENODEV)
netif_device_detach(netdev);
else if (retval)
else
netdev_err(netdev,
"failed resubmitting read bulk urb: %d\n", retval);
}

View File

@@ -541,13 +541,20 @@ resubmit_urb:
urb->transfer_buffer, ESD_USB_RX_BUFFER_SIZE,
esd_usb_read_bulk_callback, dev);
usb_anchor_urb(urb, &dev->rx_submitted);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (!err)
return;
usb_unanchor_urb(urb);
if (err == -ENODEV) {
for (i = 0; i < dev->net_count; i++) {
if (dev->nets[i])
netif_device_detach(dev->nets[i]->netdev);
}
} else if (err) {
} else {
dev_err(dev->udev->dev.parent,
"failed resubmitting read bulk urb: %pe\n", ERR_PTR(err));
}

View File

@@ -754,6 +754,10 @@ resubmit_urb:
usb_anchor_urb(urb, &parent->rx_submitted);
rc = usb_submit_urb(urb, GFP_ATOMIC);
if (!rc)
return;
usb_unanchor_urb(urb);
/* USB failure take down all interfaces */
if (rc == -ENODEV) {
@@ -762,6 +766,9 @@ device_detach:
if (parent->canch[rc])
netif_device_detach(parent->canch[rc]->netdev);
}
} else if (rc != -ESHUTDOWN && net_ratelimit()) {
netdev_info(netdev, "failed to re-submit IN URB: %pe\n",
ERR_PTR(urb->status));
}
}

View File

@@ -361,7 +361,14 @@ resubmit_urb:
urb->transfer_buffer, KVASER_USB_RX_BUFFER_SIZE,
kvaser_usb_read_bulk_callback, dev);
usb_anchor_urb(urb, &dev->rx_submitted);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (!err)
return;
usb_unanchor_urb(urb);
if (err == -ENODEV) {
for (i = 0; i < dev->nchannels; i++) {
struct kvaser_usb_net_priv *priv;
@@ -372,7 +379,7 @@ resubmit_urb:
netif_device_detach(priv->netdev);
}
} else if (err) {
} else {
dev_err(&dev->intf->dev,
"Failed resubmitting read bulk urb: %d\n", err);
}

View File

@@ -608,11 +608,17 @@ resubmit_urb:
urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE,
mcba_usb_read_bulk_callback, priv);
usb_anchor_urb(urb, &priv->rx_submitted);
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (!retval)
return;
usb_unanchor_urb(urb);
if (retval == -ENODEV)
netif_device_detach(netdev);
else if (retval)
else
netdev_err(netdev, "failed resubmitting read bulk urb: %d\n",
retval);
}

View File

@@ -541,11 +541,17 @@ resubmit_urb:
urb->transfer_buffer, RX_BUFFER_SIZE,
usb_8dev_read_bulk_callback, priv);
usb_anchor_urb(urb, &priv->rx_submitted);
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (!retval)
return;
usb_unanchor_urb(urb);
if (retval == -ENODEV)
netif_device_detach(netdev);
else if (retval)
else
netdev_err(netdev,
"failed resubmitting read bulk urb: %d\n", retval);
}

View File

@@ -1837,7 +1837,7 @@ static void xgbe_get_stats64(struct net_device *netdev,
s->multicast = pstats->rxmulticastframes_g;
s->rx_length_errors = pstats->rxlengtherror;
s->rx_crc_errors = pstats->rxcrcerror;
s->rx_fifo_errors = pstats->rxfifooverflow;
s->rx_over_errors = pstats->rxfifooverflow;
s->tx_packets = pstats->txframecount_gb;
s->tx_bytes = pstats->txoctetcount_gb;
@@ -2292,9 +2292,6 @@ read_again:
goto read_again;
if (error || packet->errors) {
if (packet->errors)
netif_err(pdata, rx_err, netdev,
"error in received packet\n");
dev_kfree_skb(skb);
goto next_packet;
}

View File

@@ -156,7 +156,7 @@ static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
ASP_RX_FILTER_NET_OFFSET_L4(32),
ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1));
rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->ch) |
ASP_RX_FILTER_NET_CFG_EN |
ASP_RX_FILTER_NET_CFG_L2_EN |
ASP_RX_FILTER_NET_CFG_L3_EN |
@@ -166,7 +166,7 @@ static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
ASP_RX_FILTER_NET_CFG(nfilt->hw_index));
rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->ch) |
ASP_RX_FILTER_NET_CFG_EN |
ASP_RX_FILTER_NET_CFG_L2_EN |
ASP_RX_FILTER_NET_CFG_L3_EN |
@@ -714,6 +714,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
nfilter = &priv->net_filters[open_index];
nfilter->claimed = true;
nfilter->port = intf->port;
nfilter->ch = intf->channel + priv->tx_chan_offset;
nfilter->hw_index = open_index;
}

View File

@@ -348,6 +348,7 @@ struct bcmasp_net_filter {
bool wake_filter;
int port;
int ch;
unsigned int hw_index;
};

View File

@@ -3801,6 +3801,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
{
int status;
bool pmac_valid = false;
u32 pmac_id;
eth_zero_addr(mac);
@@ -3813,7 +3814,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
adapter->if_handle, 0);
} else {
status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
NULL, adapter->if_handle, 0);
&pmac_id, adapter->if_handle, 0);
}
return status;

View File

@@ -2141,7 +2141,7 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
struct be_aic_obj *aic;
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
u64 rx_pkts = 0, tx_pkts = 0;
u64 rx_pkts = 0, tx_pkts = 0, pkts;
ulong now;
u32 pps, delta;
int i;
@@ -2157,15 +2157,17 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
do {
start = u64_stats_fetch_begin(&rxo->stats.sync);
rx_pkts += rxo->stats.rx_pkts;
pkts = rxo->stats.rx_pkts;
} while (u64_stats_fetch_retry(&rxo->stats.sync, start));
rx_pkts += pkts;
}
for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
do {
start = u64_stats_fetch_begin(&txo->stats.sync);
tx_pkts += txo->stats.tx_reqs;
pkts = txo->stats.tx_reqs;
} while (u64_stats_fetch_retry(&txo->stats.sync, start));
tx_pkts += pkts;
}
/* Skip, if wrapped around or first calculation */

View File

@@ -1150,7 +1150,7 @@ fec_restart(struct net_device *ndev)
u32 rcntl = FEC_RCR_MII;
if (OPT_ARCH_HAS_MAX_FL)
rcntl |= (fep->netdev->mtu + ETH_HLEN + ETH_FCS_LEN) << 16;
rcntl |= (fep->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN) << 16;
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
@@ -1285,12 +1285,13 @@ fec_restart(struct net_device *ndev)
/* When Jumbo Frame is enabled, the FIFO may not be large enough
* to hold an entire frame. In such cases, if the MTU exceeds
* (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN), configure the interface
* to operate in cut-through mode, triggered by the FIFO threshold.
* (PKT_MAXBUF_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN), configure
* the interface to operate in cut-through mode, triggered by
* the FIFO threshold.
* Otherwise, enable the ENET store-and-forward mode.
*/
if ((fep->quirks & FEC_QUIRK_JUMBO_FRAME) &&
(ndev->mtu > (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN)))
(ndev->mtu > (PKT_MAXBUF_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN)))
writel(0xF, fep->hwp + FEC_X_WMRK);
else
writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
@@ -4037,7 +4038,7 @@ static int fec_change_mtu(struct net_device *ndev, int new_mtu)
if (netif_running(ndev))
return -EBUSY;
order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN
order = get_order(new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN
+ FEC_DRV_RESERVE_SPACE);
fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
fep->pagepool_order = order;
@@ -4588,7 +4589,7 @@ fec_probe(struct platform_device *pdev)
else
fep->max_buf_size = PKT_MAXBUF_SIZE;
ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
ndev->max_mtu = fep->max_buf_size - VLAN_ETH_HLEN - ETH_FCS_LEN;
ret = register_netdev(ndev);
if (ret)

View File

@@ -1602,8 +1602,10 @@ static void ugeth_mac_config(struct phylink_config *config, unsigned int mode,
pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
tbiphy = of_phy_find_device(ug_info->tbi_node);
if (!tbiphy)
if (!tbiphy) {
pr_warn("Could not get TBI device\n");
return;
}
value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */

View File

@@ -2529,44 +2529,47 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb,
static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
struct hns3_enet_ring *ring, bool is_tx)
{
struct ring_stats ring_stats;
unsigned int start;
do {
start = u64_stats_fetch_begin(&ring->syncp);
if (is_tx) {
stats->tx_bytes += ring->stats.tx_bytes;
stats->tx_packets += ring->stats.tx_pkts;
stats->tx_dropped += ring->stats.sw_err_cnt;
stats->tx_dropped += ring->stats.tx_vlan_err;
stats->tx_dropped += ring->stats.tx_l4_proto_err;
stats->tx_dropped += ring->stats.tx_l2l3l4_err;
stats->tx_dropped += ring->stats.tx_tso_err;
stats->tx_dropped += ring->stats.over_max_recursion;
stats->tx_dropped += ring->stats.hw_limitation;
stats->tx_dropped += ring->stats.copy_bits_err;
stats->tx_dropped += ring->stats.skb2sgl_err;
stats->tx_dropped += ring->stats.map_sg_err;
stats->tx_errors += ring->stats.sw_err_cnt;
stats->tx_errors += ring->stats.tx_vlan_err;
stats->tx_errors += ring->stats.tx_l4_proto_err;
stats->tx_errors += ring->stats.tx_l2l3l4_err;
stats->tx_errors += ring->stats.tx_tso_err;
stats->tx_errors += ring->stats.over_max_recursion;
stats->tx_errors += ring->stats.hw_limitation;
stats->tx_errors += ring->stats.copy_bits_err;
stats->tx_errors += ring->stats.skb2sgl_err;
stats->tx_errors += ring->stats.map_sg_err;
} else {
stats->rx_bytes += ring->stats.rx_bytes;
stats->rx_packets += ring->stats.rx_pkts;
stats->rx_dropped += ring->stats.l2_err;
stats->rx_errors += ring->stats.l2_err;
stats->rx_errors += ring->stats.l3l4_csum_err;
stats->rx_crc_errors += ring->stats.l2_err;
stats->multicast += ring->stats.rx_multicast;
stats->rx_length_errors += ring->stats.err_pkt_len;
}
ring_stats = ring->stats;
} while (u64_stats_fetch_retry(&ring->syncp, start));
if (is_tx) {
stats->tx_bytes += ring_stats.tx_bytes;
stats->tx_packets += ring_stats.tx_pkts;
stats->tx_dropped += ring_stats.sw_err_cnt;
stats->tx_dropped += ring_stats.tx_vlan_err;
stats->tx_dropped += ring_stats.tx_l4_proto_err;
stats->tx_dropped += ring_stats.tx_l2l3l4_err;
stats->tx_dropped += ring_stats.tx_tso_err;
stats->tx_dropped += ring_stats.over_max_recursion;
stats->tx_dropped += ring_stats.hw_limitation;
stats->tx_dropped += ring_stats.copy_bits_err;
stats->tx_dropped += ring_stats.skb2sgl_err;
stats->tx_dropped += ring_stats.map_sg_err;
stats->tx_errors += ring_stats.sw_err_cnt;
stats->tx_errors += ring_stats.tx_vlan_err;
stats->tx_errors += ring_stats.tx_l4_proto_err;
stats->tx_errors += ring_stats.tx_l2l3l4_err;
stats->tx_errors += ring_stats.tx_tso_err;
stats->tx_errors += ring_stats.over_max_recursion;
stats->tx_errors += ring_stats.hw_limitation;
stats->tx_errors += ring_stats.copy_bits_err;
stats->tx_errors += ring_stats.skb2sgl_err;
stats->tx_errors += ring_stats.map_sg_err;
} else {
stats->rx_bytes += ring_stats.rx_bytes;
stats->rx_packets += ring_stats.rx_pkts;
stats->rx_dropped += ring_stats.l2_err;
stats->rx_errors += ring_stats.l2_err;
stats->rx_errors += ring_stats.l3l4_csum_err;
stats->rx_crc_errors += ring_stats.l2_err;
stats->multicast += ring_stats.rx_multicast;
stats->rx_length_errors += ring_stats.err_pkt_len;
}
}
static void hns3_nic_get_stats64(struct net_device *netdev,

View File

@@ -731,7 +731,7 @@ struct hclge_fd_tcam_config_3_cmd {
#define HCLGE_FD_AD_QID_M GENMASK(11, 2)
#define HCLGE_FD_AD_USE_COUNTER_B 12
#define HCLGE_FD_AD_COUNTER_NUM_S 13
#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(19, 13)
#define HCLGE_FD_AD_NXT_STEP_B 20
#define HCLGE_FD_AD_NXT_KEY_S 21
#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21)

View File

@@ -5690,7 +5690,7 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
action->counter_id);
action->next_input_key);
req->ad_data = cpu_to_le64(ad_data);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);

View File

@@ -43,21 +43,12 @@ static void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
napi_enable(&irq_cfg->napi);
}
static void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
{
napi_disable(&irq_cfg->napi);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_RX, NULL);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_TX, NULL);
netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
netif_napi_del(&irq_cfg->napi);
}
@@ -150,6 +141,11 @@ int hinic3_qps_irq_init(struct net_device *netdev)
goto err_release_irqs;
}
netif_queue_set_napi(irq_cfg->netdev, q_id,
NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
netif_queue_set_napi(irq_cfg->netdev, q_id,
NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
irq_cfg->msix_entry_idx,
HINIC3_SET_MSIX_AUTO_MASK);
@@ -164,6 +160,10 @@ err_release_irqs:
q_id--;
irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
qp_del_napi(irq_cfg);
netif_queue_set_napi(irq_cfg->netdev, q_id,
NETDEV_QUEUE_TYPE_RX, NULL);
netif_queue_set_napi(irq_cfg->netdev, q_id,
NETDEV_QUEUE_TYPE_TX, NULL);
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_DISABLE);
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
@@ -184,6 +184,10 @@ void hinic3_qps_irq_uninit(struct net_device *netdev)
for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
qp_del_napi(irq_cfg);
netif_queue_set_napi(irq_cfg->netdev, q_id,
NETDEV_QUEUE_TYPE_RX, NULL);
netif_queue_set_napi(irq_cfg->netdev, q_id,
NETDEV_QUEUE_TYPE_TX, NULL);
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_DISABLE);
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,

View File

@@ -460,6 +460,7 @@ static void ice_devlink_reinit_down(struct ice_pf *pf)
ice_vsi_decfg(ice_get_main_vsi(pf));
rtnl_unlock();
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
ice_deinit_dev(pf);
}

View File

@@ -979,6 +979,7 @@ void ice_map_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);

View File

@@ -2251,7 +2251,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
/* there are some rare cases when trying to release the resource
* results in an admin queue timeout, so handle them correctly
*/
timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT;
timeout = jiffies + 10 * usecs_to_jiffies(ICE_CTL_Q_SQ_CMD_TIMEOUT);
do {
status = ice_aq_release_res(hw, res, 0, NULL);
if (status != -EIO)

View File

@@ -3626,11 +3626,7 @@ ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
if (!lut)
return -ENOMEM;
err = ice_get_rss_key(vsi, rxfh->key);
if (err)
goto out;
err = ice_get_rss_lut(vsi, lut, vsi->rss_table_size);
err = ice_get_rss(vsi, rxfh->key, lut, vsi->rss_table_size);
if (err)
goto out;

View File

@@ -398,6 +398,8 @@ static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
if (!ring_stats)
goto err_out;
u64_stats_init(&ring_stats->syncp);
WRITE_ONCE(tx_ring_stats[i], ring_stats);
}
@@ -417,6 +419,8 @@ static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
if (!ring_stats)
goto err_out;
u64_stats_init(&ring_stats->syncp);
WRITE_ONCE(rx_ring_stats[i], ring_stats);
}
@@ -3805,22 +3809,31 @@ int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
{
struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
struct ice_pf *pf = vsi->back;
struct ice_vlan vlan;
int err;
vlan = ICE_VLAN(0, 0, 0);
err = vlan_ops->del_vlan(vsi, &vlan);
if (err && err != -EEXIST)
return err;
if (pf->lag && pf->lag->primary) {
dev_dbg(ice_pf_to_dev(pf), "Interface is primary in aggregate - not deleting prune list\n");
} else {
vlan = ICE_VLAN(0, 0, 0);
err = vlan_ops->del_vlan(vsi, &vlan);
if (err && err != -EEXIST)
return err;
}
/* in SVM both VLAN 0 filters are identical */
if (!ice_is_dvm_ena(&vsi->back->hw))
return 0;
vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
err = vlan_ops->del_vlan(vsi, &vlan);
if (err && err != -EEXIST)
return err;
if (pf->lag && pf->lag->primary) {
dev_dbg(ice_pf_to_dev(pf), "Interface is primary in aggregate - not deleting QinQ prune list\n");
} else {
vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
err = vlan_ops->del_vlan(vsi, &vlan);
if (err && err != -EEXIST)
return err;
}
/* when deleting the last VLAN filter, make sure to disable the VLAN
* promisc mode so the filter isn't left by accident

View File

@@ -4836,6 +4836,7 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_dpll_deinit(pf);
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
xa_destroy(&pf->eswitch.reprs);
ice_hwmon_exit(pf);
}
static void ice_init_wakeup(struct ice_pf *pf)
@@ -5437,8 +5438,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_free_vfs(pf);
}
ice_hwmon_exit(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
@@ -7988,6 +7987,34 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
return status;
}
/**
* ice_get_rss - Get RSS LUT and/or key
* @vsi: Pointer to VSI structure
* @seed: Buffer to store the key in
* @lut: Buffer to store the lookup table entries
* @lut_size: Size of buffer to store the lookup table entries
*
* Return: 0 on success, negative on failure
*/
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
{
int err;
if (seed) {
err = ice_get_rss_key(vsi, seed);
if (err)
return err;
}
if (lut) {
err = ice_get_rss_lut(vsi, lut, lut_size);
if (err)
return err;
}
return 0;
}
/**
* ice_set_rss_hfunc - Set RSS HASH function
* @vsi: Pointer to VSI structure

View File

@@ -108,11 +108,11 @@ static u64 idpf_ptp_read_src_clk_reg_direct(struct idpf_adapter *adapter,
ptp_read_system_prets(sts);
idpf_ptp_enable_shtime(adapter);
lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
/* Read the system timestamp post PHC read */
ptp_read_system_postts(sts);
lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
spin_unlock(&ptp->read_dev_clk_lock);

View File

@@ -3941,7 +3941,7 @@ static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
static void idpf_net_dim(struct idpf_q_vector *q_vector)
{
struct dim_sample dim_sample = { };
u64 packets, bytes;
u64 packets, bytes, pkts, bts;
u32 i;
if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
@@ -3953,9 +3953,12 @@ static void idpf_net_dim(struct idpf_q_vector *q_vector)
do {
start = u64_stats_fetch_begin(&txq->stats_sync);
packets += u64_stats_read(&txq->q_stats.packets);
bytes += u64_stats_read(&txq->q_stats.bytes);
pkts = u64_stats_read(&txq->q_stats.packets);
bts = u64_stats_read(&txq->q_stats.bytes);
} while (u64_stats_fetch_retry(&txq->stats_sync, start));
packets += pkts;
bytes += bts;
}
idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
@@ -3972,9 +3975,12 @@ check_rx_itr:
do {
start = u64_stats_fetch_begin(&rxq->stats_sync);
packets += u64_stats_read(&rxq->q_stats.packets);
bytes += u64_stats_read(&rxq->q_stats.bytes);
pkts = u64_stats_read(&rxq->q_stats.packets);
bts = u64_stats_read(&rxq->q_stats.bytes);
} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
packets += pkts;
bytes += bts;
}
idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,

View File

@@ -443,9 +443,10 @@
#define IGC_TXPBSIZE_DEFAULT ( \
IGC_TXPB0SIZE(20) | IGC_TXPB1SIZE(0) | IGC_TXPB2SIZE(0) | \
IGC_TXPB3SIZE(0) | IGC_OS2BMCPBSIZE(4))
/* TSN value following I225/I226 SW User Manual Section 7.5.4 */
#define IGC_TXPBSIZE_TSN ( \
IGC_TXPB0SIZE(7) | IGC_TXPB1SIZE(7) | IGC_TXPB2SIZE(7) | \
IGC_TXPB3SIZE(7) | IGC_OS2BMCPBSIZE(4))
IGC_TXPB0SIZE(5) | IGC_TXPB1SIZE(5) | IGC_TXPB2SIZE(5) | \
IGC_TXPB3SIZE(5) | IGC_OS2BMCPBSIZE(4))
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */

View File

@@ -1565,8 +1565,8 @@ static int igc_ethtool_set_channels(struct net_device *netdev,
if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
/* Do not allow channel reconfiguration when mqprio is enabled */
if (adapter->strict_priority_enable)
/* Do not allow channel reconfiguration when any TSN qdisc is enabled */
if (adapter->flags & IGC_FLAG_TSN_ANY_ENABLED)
return -EINVAL;
/* Verify the number of channels doesn't exceed hw limits */

View File

@@ -7759,6 +7759,11 @@ int igc_reinit_queues(struct igc_adapter *adapter)
if (netif_running(netdev))
err = igc_open(netdev);
if (!err) {
/* Restore default IEEE 802.1Qbv schedule after queue reinit */
igc_tsn_clear_schedule(adapter);
}
return err;
}

View File

@@ -774,36 +774,43 @@ static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter,
static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 txstmpl_old;
u64 regval;
u32 mask;
int i;
/* Establish baseline of TXSTMPL_0 before checking TXTT_0.
* This baseline is used to detect if a new timestamp arrives in
* register 0 during the hardware bug workaround below.
*/
txstmpl_old = rd32(IGC_TXSTMPL);
mask = rd32(IGC_TSYNCTXCTL) & IGC_TSYNCTXCTL_TXTT_ANY;
if (mask & IGC_TSYNCTXCTL_TXTT_0) {
regval = rd32(IGC_TXSTMPL);
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
} else {
/* There's a bug in the hardware that could cause
* missing interrupts for TX timestamping. The issue
* is that for new interrupts to be triggered, the
* IGC_TXSTMPH_0 register must be read.
/* TXTT_0 not set - register 0 has no new timestamp initially.
*
* To avoid discarding a valid timestamp that just
* happened at the "wrong" time, we need to confirm
* that there was no timestamp captured, we do that by
* assuming that no two timestamps in sequence have
* the same nanosecond value.
* Hardware bug: Future timestamp interrupts won't fire unless
* TXSTMPH_0 is read, even if the timestamp was captured in
* registers 1-3.
*
* So, we read the "low" register, read the "high"
* register (to latch a new timestamp) and read the
* "low" register again, if "old" and "new" versions
* of the "low" register are different, a valid
* timestamp was captured, we can read the "high"
* register again.
* Workaround: Read TXSTMPH_0 here to enable future interrupts.
* However, this read clears TXTT_0. If a timestamp arrives in
* register 0 after checking TXTT_0 but before this read, it
* would be lost.
*
* To detect this race: We saved a baseline read of TXSTMPL_0
* before TXTT_0 check. After performing the workaround read of
* TXSTMPH_0, we read TXSTMPL_0 again. Since consecutive
* timestamps never share the same nanosecond value, a change
* between the baseline and new TXSTMPL_0 indicates a timestamp
* arrived during the race window. If so, read the complete
* timestamp.
*/
u32 txstmpl_old, txstmpl_new;
u32 txstmpl_new;
txstmpl_old = rd32(IGC_TXSTMPL);
rd32(IGC_TXSTMPH);
txstmpl_new = rd32(IGC_TXSTMPL);
@@ -818,7 +825,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
done:
/* Now that the problematic first register was handled, we can
* use retrieve the timestamps from the other registers
* retrieve the timestamps from the other registers
* (starting from '1') with less complications.
*/
for (i = 1; i < IGC_MAX_TX_TSTAMP_REGS; i++) {

View File

@@ -1551,8 +1551,8 @@ static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
return -ENODEV;
}
static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
int num_lfs, struct rsrc_attach *attach)
static int rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
int num_lfs, struct rsrc_attach *attach)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
@@ -1562,21 +1562,21 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
u64 cfg;
if (!num_lfs)
return;
return -EINVAL;
blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
if (blkaddr < 0)
return;
return -EFAULT;
block = &hw->block[blkaddr];
if (!block->lf.bmap)
return;
return -ESRCH;
for (slot = 0; slot < num_lfs; slot++) {
/* Allocate the resource */
lf = rvu_alloc_rsrc(&block->lf);
if (lf < 0)
return;
return -EFAULT;
cfg = (1ULL << 63) | (pcifunc << 8) | slot;
rvu_write64(rvu, blkaddr, block->lfcfg_reg |
@@ -1587,6 +1587,8 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
/* Set start MSIX vector for this LF within this PF/VF */
rvu_set_msix_offset(rvu, pfvf, block, lf);
}
return 0;
}
static int rvu_check_rsrc_availability(struct rvu *rvu,
@@ -1724,22 +1726,31 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
int err;
/* If first request, detach all existing attached resources */
if (!attach->modify)
rvu_detach_rsrcs(rvu, NULL, pcifunc);
if (!attach->modify) {
err = rvu_detach_rsrcs(rvu, NULL, pcifunc);
if (err)
return err;
}
mutex_lock(&rvu->rsrc_lock);
/* Check if the request can be accommodated */
err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
if (err)
goto exit;
goto fail1;
/* Now attach the requested resources */
if (attach->npalf)
rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
if (attach->npalf) {
err = rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
if (err)
goto fail1;
}
if (attach->nixlf)
rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
if (attach->nixlf) {
err = rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
if (err)
goto fail2;
}
if (attach->sso) {
/* RVU func doesn't know which exact LF or slot is attached
@@ -1749,33 +1760,64 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
*/
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
attach->sso, attach);
err = rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
attach->sso, attach);
if (err)
goto fail3;
}
if (attach->ssow) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
attach->ssow, attach);
err = rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
attach->ssow, attach);
if (err)
goto fail4;
}
if (attach->timlfs) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
attach->timlfs, attach);
err = rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
attach->timlfs, attach);
if (err)
goto fail5;
}
if (attach->cptlfs) {
if (attach->modify &&
rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
attach->cptlfs, attach);
err = rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
attach->cptlfs, attach);
if (err)
goto fail6;
}
exit:
mutex_unlock(&rvu->rsrc_lock);
return 0;
fail6:
if (attach->timlfs)
rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
fail5:
if (attach->ssow)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
fail4:
if (attach->sso)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
fail3:
if (attach->nixlf)
rvu_detach_block(rvu, pcifunc, BLKTYPE_NIX);
fail2:
if (attach->npalf)
rvu_detach_block(rvu, pcifunc, BLKTYPE_NPA);
fail1:
mutex_unlock(&rvu->rsrc_lock);
return err;
}

View File

@@ -1222,6 +1222,9 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
u8 cgx_idx, lmac;
void *cgxd;
if (!rvu->fwdata)
return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;

View File

@@ -56,7 +56,7 @@ int rvu_sdp_init(struct rvu *rvu)
struct rvu_pfvf *pfvf;
u32 i = 0;
if (rvu->fwdata->channel_data.valid) {
if (rvu->fwdata && rvu->fwdata->channel_data.valid) {
sdp_pf_num[0] = 0;
pfvf = &rvu->pf[sdp_pf_num[0]];
pfvf->sdp_info = &rvu->fwdata->channel_data.info;

View File

@@ -328,7 +328,7 @@ static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
req->mask[0] = ~0ULL;
req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
req->mask[0] &= ~MCS_TCAM0_MAC_DA_MASK;
req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
req->mask[1] = ~0ULL;

View File

@@ -940,13 +940,8 @@ static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
size_t offset, size_t size,
enum dma_data_direction dir)
{
dma_addr_t iova;
iova = dma_map_page_attrs(pfvf->dev, page,
return dma_map_page_attrs(pfvf->dev, page,
offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
if (unlikely(dma_mapping_error(pfvf->dev, iova)))
return (dma_addr_t)NULL;
return iova;
}
static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,

View File

@@ -3249,7 +3249,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_XSK_ZEROCOPY;
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);

View File

@@ -4359,11 +4359,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int first_entry, tx_packets;
struct stmmac_txq_stats *txq_stats;
struct stmmac_tx_queue *tx_q;
bool set_ic, is_last_segment;
u32 pay_len, mss, queue;
int i, first_tx, nfrags;
u8 proto_hdr_len, hdr;
dma_addr_t des;
bool set_ic;
/* Always insert VLAN tag to SKB payload for TSO frames.
*
@@ -4551,10 +4551,16 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_enable_tx_timestamp(priv, first);
}
/* If we only have one entry used, then the first entry is the last
* segment.
*/
is_last_segment = ((tx_q->cur_tx - first_entry) &
(priv->dma_conf.dma_tx_size - 1)) == 1;
/* Complete the first descriptor before granting the DMA */
stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
tx_q->tx_skbuff_dma[first_entry].last_segment,
hdr / 4, (skb->len - proto_hdr_len));
is_last_segment, hdr / 4,
skb->len - proto_hdr_len);
/* If context desc is used to change MSS */
if (mss_desc) {

View File

@@ -70,7 +70,7 @@ int txgbe_test_hostif(struct wx *wx)
buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
WX_HI_COMMAND_TIMEOUT, true);
WX_HI_COMMAND_TIMEOUT, false);
}
int txgbe_read_eeprom_hostif(struct wx *wx,
@@ -148,7 +148,7 @@ static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int
buffer.duplex = duplex;
return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
WX_HI_COMMAND_TIMEOUT, true);
WX_HI_COMMAND_TIMEOUT, false);
}
static void txgbe_get_link_capabilities(struct wx *wx, int *speed,

View File

@@ -69,7 +69,6 @@ struct ipvl_dev {
DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
netdev_features_t sfeatures;
u32 msg_enable;
spinlock_t addrs_lock;
};
struct ipvl_addr {
@@ -90,6 +89,7 @@ struct ipvl_port {
struct net_device *dev;
possible_net_t pnet;
struct hlist_head hlhead[IPVLAN_HASH_SIZE];
spinlock_t addrs_lock; /* guards hash-table and addrs */
struct list_head ipvlans;
u16 mode;
u16 flags;

View File

@@ -107,17 +107,15 @@ void ipvlan_ht_addr_del(struct ipvl_addr *addr)
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6)
{
struct ipvl_addr *addr, *ret = NULL;
struct ipvl_addr *addr;
rcu_read_lock();
list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
if (addr_equal(is_v6, addr, iaddr)) {
ret = addr;
break;
}
assert_spin_locked(&ipvlan->port->addrs_lock);
list_for_each_entry(addr, &ipvlan->addrs, anode) {
if (addr_equal(is_v6, addr, iaddr))
return addr;
}
rcu_read_unlock();
return ret;
return NULL;
}
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)

View File

@@ -75,6 +75,7 @@ static int ipvlan_port_create(struct net_device *dev)
for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
INIT_HLIST_HEAD(&port->hlhead[idx]);
spin_lock_init(&port->addrs_lock);
skb_queue_head_init(&port->backlog);
INIT_WORK(&port->wq, ipvlan_process_multicast);
ida_init(&port->ida);
@@ -181,6 +182,7 @@ static void ipvlan_uninit(struct net_device *dev)
static int ipvlan_open(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port = ipvlan->port;
struct ipvl_addr *addr;
if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
@@ -189,10 +191,10 @@ static int ipvlan_open(struct net_device *dev)
else
dev->flags &= ~IFF_NOARP;
rcu_read_lock();
list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
spin_lock_bh(&port->addrs_lock);
list_for_each_entry(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_add(ipvlan, addr);
rcu_read_unlock();
spin_unlock_bh(&port->addrs_lock);
return 0;
}
@@ -206,10 +208,10 @@ static int ipvlan_stop(struct net_device *dev)
dev_uc_unsync(phy_dev, dev);
dev_mc_unsync(phy_dev, dev);
rcu_read_lock();
list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
spin_lock_bh(&ipvlan->port->addrs_lock);
list_for_each_entry(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_del(addr);
rcu_read_unlock();
spin_unlock_bh(&ipvlan->port->addrs_lock);
return 0;
}
@@ -579,7 +581,6 @@ int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params,
if (!tb[IFLA_MTU])
ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
spin_lock_init(&ipvlan->addrs_lock);
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
@@ -657,13 +658,13 @@ void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr, *next;
spin_lock_bh(&ipvlan->addrs_lock);
spin_lock_bh(&ipvlan->port->addrs_lock);
list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
kfree_rcu(addr, rcu);
}
spin_unlock_bh(&ipvlan->addrs_lock);
spin_unlock_bh(&ipvlan->port->addrs_lock);
ida_free(&ipvlan->port->ida, dev->dev_id);
list_del_rcu(&ipvlan->pnode);
@@ -817,6 +818,8 @@ static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
assert_spin_locked(&ipvlan->port->addrs_lock);
addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
if (!addr)
return -ENOMEM;
@@ -847,16 +850,16 @@ static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
spin_lock_bh(&ipvlan->addrs_lock);
spin_lock_bh(&ipvlan->port->addrs_lock);
addr = ipvlan_find_addr(ipvlan, iaddr, is_v6);
if (!addr) {
spin_unlock_bh(&ipvlan->addrs_lock);
spin_unlock_bh(&ipvlan->port->addrs_lock);
return;
}
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
spin_unlock_bh(&ipvlan->addrs_lock);
spin_unlock_bh(&ipvlan->port->addrs_lock);
kfree_rcu(addr, rcu);
}
@@ -878,14 +881,14 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
int ret = -EINVAL;
spin_lock_bh(&ipvlan->addrs_lock);
spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv6=%pI6c addr for %s intf\n",
ip6_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip6_addr, true);
spin_unlock_bh(&ipvlan->addrs_lock);
spin_unlock_bh(&ipvlan->port->addrs_lock);
return ret;
}
@@ -924,21 +927,24 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused,
struct in6_validator_info *i6vi = (struct in6_validator_info *)ptr;
struct net_device *dev = (struct net_device *)i6vi->i6vi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
int ret = NOTIFY_OK;
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, &i6vi->i6vi_addr, true)) {
NL_SET_ERR_MSG(i6vi->extack,
"Address already assigned to an ipvlan device");
return notifier_from_errno(-EADDRINUSE);
ret = notifier_from_errno(-EADDRINUSE);
}
spin_unlock_bh(&ipvlan->port->addrs_lock);
break;
}
return NOTIFY_OK;
return ret;
}
#endif
@@ -946,14 +952,14 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
int ret = -EINVAL;
spin_lock_bh(&ipvlan->addrs_lock);
spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv4=%pI4 on %s intf.\n",
ip4_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip4_addr, false);
spin_unlock_bh(&ipvlan->addrs_lock);
spin_unlock_bh(&ipvlan->port->addrs_lock);
return ret;
}
@@ -995,21 +1001,24 @@ static int ipvlan_addr4_validator_event(struct notifier_block *unused,
struct in_validator_info *ivi = (struct in_validator_info *)ptr;
struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
int ret = NOTIFY_OK;
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, &ivi->ivi_addr, false)) {
NL_SET_ERR_MSG(ivi->extack,
"Address already assigned to an ipvlan device");
return notifier_from_errno(-EADDRINUSE);
ret = notifier_from_errno(-EADDRINUSE);
}
spin_unlock_bh(&ipvlan->port->addrs_lock);
break;
}
return NOTIFY_OK;
return ret;
}
static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {

View File

@@ -244,7 +244,9 @@ static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
&state->state, &nsim_bpf_string_fops);
debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
mutex_lock(&nsim_dev->progs_list_lock);
list_add_tail(&state->l, &nsim_dev->bpf_bound_progs);
mutex_unlock(&nsim_dev->progs_list_lock);
prog->aux->offload->dev_priv = state;
@@ -273,12 +275,16 @@ static int nsim_bpf_translate(struct bpf_prog *prog)
static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state;
struct nsim_dev *nsim_dev;
state = prog->aux->offload->dev_priv;
nsim_dev = state->nsim_dev;
WARN(state->is_loaded,
"offload state destroyed while program still bound");
debugfs_remove_recursive(state->ddir);
mutex_lock(&nsim_dev->progs_list_lock);
list_del(&state->l);
mutex_unlock(&nsim_dev->progs_list_lock);
kfree(state);
}

View File

@@ -1647,6 +1647,7 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
nsim_dev->test2 = NSIM_DEV_TEST2_DEFAULT;
spin_lock_init(&nsim_dev->fa_cookie_lock);
mutex_init(&nsim_dev->progs_list_lock);
dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
@@ -1785,6 +1786,7 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
devl_unregister(devlink);
kfree(nsim_dev->vfconfigs);
kfree(nsim_dev->fa_cookie);
mutex_destroy(&nsim_dev->progs_list_lock);
devl_unlock(devlink);
devlink_free(devlink);
dev_set_drvdata(&nsim_bus_dev->dev, NULL);

View File

@@ -324,6 +324,7 @@ struct nsim_dev {
u32 prog_id_gen;
struct list_head bpf_bound_progs;
struct list_head bpf_bound_maps;
struct mutex progs_list_lock;
struct netdev_phys_item_id switch_id;
struct list_head port_list;
bool fw_update_status;

View File

@@ -93,12 +93,10 @@ static unsigned int mtk_pcs_lynxi_inband_caps(struct phylink_pcs *pcs,
{
switch (interface) {
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_SGMII:
return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
case PHY_INTERFACE_MODE_2500BASEX:
return LINK_INBAND_DISABLE;
default:
return 0;
}

View File

@@ -277,7 +277,7 @@ static int xway_gphy_init_leds(struct phy_device *phydev)
static int xway_gphy_config_init(struct phy_device *phydev)
{
struct device_node *np = phydev->mdio.dev.of_node;
struct device_node *np;
int err;
/* Mask all interrupts */
@@ -286,7 +286,10 @@ static int xway_gphy_config_init(struct phy_device *phydev)
return err;
/* Use default LED configuration if 'leds' node isn't defined */
if (!of_get_child_by_name(np, "leds"))
np = of_get_child_by_name(phydev->mdio.dev.of_node, "leds");
if (np)
of_node_put(np);
else
xway_gphy_init_leds(phydev);
/* Clear all pending interrupts */

View File

@@ -519,6 +519,8 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
SFP_QUIRK_F("H-COM", "SPP425H-GAB4", sfp_fixup_potron),
// HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
// 2600MBd in their EERPOM
SFP_QUIRK_S("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),

View File

@@ -603,10 +603,6 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
{
USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
{
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,

View File

@@ -1821,9 +1821,12 @@ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
if ((dev->driver_info->flags & FLAG_NOARP) != 0)
net->flags |= IFF_NOARP;
/* maybe the remote can't receive an Ethernet MTU */
if (net->mtu > (dev->hard_mtu - net->hard_header_len))
net->mtu = dev->hard_mtu - net->hard_header_len;
if (net->max_mtu > (dev->hard_mtu - net->hard_header_len))
net->max_mtu = dev->hard_mtu - net->hard_header_len;
if (net->mtu > net->max_mtu)
net->mtu = net->max_mtu;
} else if (!info->in || !info->out)
status = usbnet_get_endpoints(dev, udev);
else {
@@ -1984,6 +1987,7 @@ int usbnet_resume(struct usb_interface *intf)
} else {
netif_trans_update(dev->net);
__skb_queue_tail(&dev->txq, skb);
netdev_sent_queue(dev->net, skb->len);
}
}

View File

@@ -228,16 +228,20 @@ static void veth_get_ethtool_stats(struct net_device *dev,
const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
const void *base = (void *)&rq_stats->vs;
unsigned int start, tx_idx = idx;
u64 buf[VETH_TQ_STATS_LEN];
size_t offset;
tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
do {
start = u64_stats_fetch_begin(&rq_stats->syncp);
for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
offset = veth_tq_stats_desc[j].offset;
data[tx_idx + j] += *(u64 *)(base + offset);
buf[j] = *(u64 *)(base + offset);
}
} while (u64_stats_fetch_retry(&rq_stats->syncp, start));
tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
for (j = 0; j < VETH_TQ_STATS_LEN; j++)
data[tx_idx + j] += buf[j];
}
pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN;

View File

@@ -1727,8 +1727,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
(ce_state->src_ring->nentries *
sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
ce_state->src_ring->base_addr_owner_space,
ce_state->src_ring->base_addr_ce_space);
ce_state->src_ring->base_addr_owner_space_unaligned,
ce_state->src_ring->base_addr_ce_space_unaligned);
kfree(ce_state->src_ring);
}
@@ -1737,8 +1737,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
(ce_state->dest_ring->nentries *
sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
ce_state->dest_ring->base_addr_owner_space,
ce_state->dest_ring->base_addr_ce_space);
ce_state->dest_ring->base_addr_owner_space_unaligned,
ce_state->dest_ring->base_addr_ce_space_unaligned);
kfree(ce_state->dest_ring);
}
@@ -1758,8 +1758,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
(ce_state->src_ring->nentries *
sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
ce_state->src_ring->base_addr_owner_space,
ce_state->src_ring->base_addr_ce_space);
ce_state->src_ring->base_addr_owner_space_unaligned,
ce_state->src_ring->base_addr_ce_space_unaligned);
kfree(ce_state->src_ring);
}
@@ -1768,8 +1768,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
(ce_state->dest_ring->nentries *
sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
ce_state->dest_ring->base_addr_owner_space,
ce_state->dest_ring->base_addr_ce_space);
ce_state->dest_ring->base_addr_owner_space_unaligned,
ce_state->dest_ring->base_addr_ce_space_unaligned);
kfree(ce_state->dest_ring);
}

View File

@@ -984,8 +984,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
dma_free_coherent(ab->dev,
pipe->src_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->src_ring->base_addr_owner_space,
pipe->src_ring->base_addr_ce_space);
pipe->src_ring->base_addr_owner_space_unaligned,
pipe->src_ring->base_addr_ce_space_unaligned);
kfree(pipe->src_ring);
pipe->src_ring = NULL;
}
@@ -995,8 +995,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
dma_free_coherent(ab->dev,
pipe->dest_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->dest_ring->base_addr_owner_space,
pipe->dest_ring->base_addr_ce_space);
pipe->dest_ring->base_addr_owner_space_unaligned,
pipe->dest_ring->base_addr_ce_space_unaligned);
kfree(pipe->dest_ring);
pipe->dest_ring = NULL;
}
@@ -1007,8 +1007,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
dma_free_coherent(ab->dev,
pipe->status_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->status_ring->base_addr_owner_space,
pipe->status_ring->base_addr_ce_space);
pipe->status_ring->base_addr_owner_space_unaligned,
pipe->status_ring->base_addr_ce_space_unaligned);
kfree(pipe->status_ring);
pipe->status_ring = NULL;
}

View File

@@ -5495,7 +5495,8 @@ static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
for_each_set_bit(link_id, &links_map, ATH12K_NUM_MAX_LINKS) {
arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
if (!arvif || arvif->is_started)
if (!arvif || !arvif->is_created ||
arvif->ar->scan.arvif != arvif)
continue;
ar = arvif->ar;
@@ -9172,7 +9173,10 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
} else {
link_id = 0;
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
link_id = ATH12K_FIRST_SCAN_LINK;
else
link_id = 0;
}
arvif = rcu_dereference(ahvif->link[link_id]);
@@ -12142,6 +12146,9 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
if (drop)
return;
for_each_ar(ah, ar, i)
wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work);
/* vif can be NULL when flush() is considered for hw */
if (!vif) {
for_each_ar(ah, ar, i)
@@ -12149,9 +12156,6 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
return;
}
for_each_ar(ah, ar, i)
wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work);
ahvif = ath12k_vif_to_ahvif(vif);
links = ahvif->links_map;
for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
@@ -13343,7 +13347,7 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
ath12k_scan_abort(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
wiphy_work_flush(hw->wiphy, &ar->scan.vdev_clean_wk);
return 0;
}

View File

@@ -6575,16 +6575,9 @@ static int freq_to_idx(struct ath12k *ar, int freq)
if (!sband)
continue;
for (ch = 0; ch < sband->n_channels; ch++, idx++) {
if (sband->channels[ch].center_freq <
KHZ_TO_MHZ(ar->freq_range.start_freq) ||
sband->channels[ch].center_freq >
KHZ_TO_MHZ(ar->freq_range.end_freq))
continue;
for (ch = 0; ch < sband->n_channels; ch++, idx++)
if (sband->channels[ch].center_freq == freq)
goto exit;
}
}
exit:

View File

@@ -825,7 +825,7 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
static void mwifiex_update_ampdu_rxwinsize(struct mwifiex_adapter *adapter,
bool coex_flag)
{
u8 i;
u8 i, j;
u32 rx_win_size;
struct mwifiex_private *priv;
@@ -863,8 +863,8 @@ static void mwifiex_update_ampdu_rxwinsize(struct mwifiex_adapter *adapter,
if (rx_win_size != priv->add_ba_param.rx_win_size) {
if (!priv->media_connected)
continue;
for (i = 0; i < MAX_NUM_TID; i++)
mwifiex_11n_delba(priv, i);
for (j = 0; j < MAX_NUM_TID; j++)
mwifiex_11n_delba(priv, j);
}
}
}

View File

@@ -2035,6 +2035,7 @@ int rsi_mac80211_attach(struct rsi_common *common)
hw->queues = MAX_HW_QUEUES;
hw->extra_tx_headroom = RSI_NEEDED_HEADROOM;
hw->vif_data_size = sizeof(struct vif_priv);
hw->max_rates = 1;
hw->max_rate_tries = MAX_RETRIES;

View File

@@ -78,9 +78,8 @@ struct mhi_mbim_context {
struct mbim_tx_hdr {
struct usb_cdc_ncm_nth16 nth16;
/* Must be last as it ends in a flexible-array member. */
struct usb_cdc_ncm_ndp16 ndp16;
struct usb_cdc_ncm_dpe16 dpe16[2];
} __packed;
static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim,
@@ -108,20 +107,20 @@ static int mhi_mbim_get_link_mux_id(struct mhi_controller *cntrl)
static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
u16 tx_seq)
{
DEFINE_RAW_FLEX(struct mbim_tx_hdr, mbim_hdr, ndp16.dpe16, 2);
unsigned int dgram_size = skb->len;
struct usb_cdc_ncm_nth16 *nth16;
struct usb_cdc_ncm_ndp16 *ndp16;
struct mbim_tx_hdr *mbim_hdr;
/* Only one NDP is sent, containing the IP packet (no aggregation) */
/* Ensure we have enough headroom for crafting MBIM header */
if (skb_cow_head(skb, __struct_size(mbim_hdr))) {
if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
dev_kfree_skb_any(skb);
return NULL;
}
mbim_hdr = skb_push(skb, __struct_size(mbim_hdr));
mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
/* Fill NTB header */
nth16 = &mbim_hdr->nth16;
@@ -134,11 +133,12 @@ static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
/* Fill the unique NDP */
ndp16 = &mbim_hdr->ndp16;
ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24));
ndp16->wLength = cpu_to_le16(struct_size(ndp16, dpe16, 2));
ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
+ sizeof(struct usb_cdc_ncm_dpe16) * 2);
ndp16->wNextNdpIndex = 0;
/* Datagram follows the mbim header */
ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(__struct_size(mbim_hdr));
ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
/* null termination */
@@ -584,8 +584,7 @@ static void mhi_mbim_setup(struct net_device *ndev)
{
ndev->header_ops = NULL; /* No header */
ndev->type = ARPHRD_RAWIP;
ndev->needed_headroom =
struct_size_t(struct mbim_tx_hdr, ndp16.dpe16, 2);
ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
ndev->hard_header_len = 0;
ndev->addr_len = 0;
ndev->flags = IFF_POINTOPOINT | IFF_NOARP;

View File

@@ -125,10 +125,6 @@ static ssize_t virtual_ncidev_write(struct file *file,
kfree_skb(skb);
return -EFAULT;
}
if (strnlen(skb->data, count) != count) {
kfree_skb(skb);
return -EINVAL;
}
nci_recv_frame(vdev->ndev, skb);
return count;

View File

@@ -3221,8 +3221,6 @@ struct cfg80211_auth_request {
* if this is %NULL for a link, that link is not requested
* @elems: extra elements for the per-STA profile for this link
* @elems_len: length of the elements
* @disabled: If set this link should be included during association etc. but it
* should not be used until enabled by the AP MLD.
* @error: per-link error code, must be <= 0. If there is an error, then the
* operation as a whole must fail.
*/
@@ -3230,7 +3228,6 @@ struct cfg80211_assoc_link {
struct cfg80211_bss *bss;
const u8 *elems;
size_t elems_len;
bool disabled;
int error;
};

View File

@@ -322,6 +322,7 @@
EM(rxrpc_call_put_kernel, "PUT kernel ") \
EM(rxrpc_call_put_poke, "PUT poke ") \
EM(rxrpc_call_put_recvmsg, "PUT recvmsg ") \
EM(rxrpc_call_put_recvmsg_peek_nowait, "PUT peek-nwt") \
EM(rxrpc_call_put_release_recvmsg_q, "PUT rls-rcmq") \
EM(rxrpc_call_put_release_sock, "PUT rls-sock") \
EM(rxrpc_call_put_release_sock_tba, "PUT rls-sk-a") \
@@ -340,6 +341,9 @@
EM(rxrpc_call_see_input, "SEE input ") \
EM(rxrpc_call_see_notify_released, "SEE nfy-rlsd") \
EM(rxrpc_call_see_recvmsg, "SEE recvmsg ") \
EM(rxrpc_call_see_recvmsg_requeue, "SEE recv-rqu") \
EM(rxrpc_call_see_recvmsg_requeue_first, "SEE recv-rqF") \
EM(rxrpc_call_see_recvmsg_requeue_move, "SEE recv-rqM") \
EM(rxrpc_call_see_release, "SEE release ") \
EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \

View File

@@ -2880,8 +2880,9 @@ enum nl80211_commands {
* index. If the userspace includes more RNR elements than number of
* MBSSID elements then these will be added in every EMA beacon.
*
* @NL80211_ATTR_MLO_LINK_DISABLED: Flag attribute indicating that the link is
* disabled.
* @NL80211_ATTR_MLO_LINK_DISABLED: Unused. It was used to indicate that a link
* is disabled during association. However, the AP will send the
* information by including a TTLM in the association response.
*
* @NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA: Include BSS usage data, i.e.
* include BSSes that can only be used in restricted scenarios and/or

View File

@@ -1312,14 +1312,15 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
has_mac = skb_mac_header_was_set(skb);
has_trans = skb_transport_header_was_set(skb);
printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
"mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n"
printk("%sskb len=%u data_len=%u headroom=%u headlen=%u tailroom=%u\n"
"end-tail=%u mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n"
"shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
"csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
"hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n"
"priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n"
"encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n",
level, skb->len, headroom, skb_headlen(skb), tailroom,
level, skb->len, skb->data_len, headroom, skb_headlen(skb),
tailroom, skb->end - skb->tail,
has_mac ? skb->mac_header : -1,
has_mac ? skb_mac_header_len(skb) : -1,
skb->mac_len,

View File

@@ -158,7 +158,7 @@ unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
DSA_MAX_NUM_OFFLOADING_BRIDGES,
1);
if (bridge_num >= max)
if (bridge_num > max)
return 0;
set_bit(bridge_num, &dsa_fwd_offloading_bridges);

View File

@@ -215,6 +215,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
return gue_control_message(skb, guehdr);
proto_ctype = guehdr->proto_ctype;
if (unlikely(!proto_ctype))
goto drop;
__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
skb_reset_transport_header(skb);

View File

@@ -15,7 +15,7 @@
const struct nla_policy fou_nl_policy[FOU_ATTR_IFINDEX + 1] = {
[FOU_ATTR_PORT] = { .type = NLA_BE16, },
[FOU_ATTR_AF] = { .type = NLA_U8, },
[FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
[FOU_ATTR_IPPROTO] = NLA_POLICY_MIN(NLA_U8, 1),
[FOU_ATTR_TYPE] = { .type = NLA_U8, },
[FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
[FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },

View File

@@ -1555,8 +1555,8 @@ skip_routeinfo:
memcpy(&n, ((u8 *)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu));
mtu = ntohl(n);
if (in6_dev->ra_mtu != mtu) {
in6_dev->ra_mtu = mtu;
if (READ_ONCE(in6_dev->ra_mtu) != mtu) {
WRITE_ONCE(in6_dev->ra_mtu, mtu);
send_ifinfo_notify = true;
}

View File

@@ -1086,8 +1086,10 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
tunnel = session->tunnel;
/* Check protocol version */
if (version != tunnel->version)
if (version != tunnel->version) {
l2tp_session_put(session);
goto invalid;
}
if (version == L2TP_HDR_VER_3 &&
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
@@ -1414,8 +1416,6 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
{
struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
del_work);
struct sock *sk = tunnel->sock;
struct socket *sock = sk->sk_socket;
l2tp_tunnel_closeall(tunnel);
@@ -1423,6 +1423,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
* the sk API to release it here.
*/
if (tunnel->fd < 0) {
struct socket *sock = tunnel->sock->sk_socket;
if (sock) {
kernel_sock_shutdown(sock, SHUT_RDWR);
sock_release(sock);

View File

@@ -451,8 +451,6 @@ struct ieee80211_mgd_assoc_data {
struct ieee80211_conn_settings conn;
u16 status;
bool disabled;
} link[IEEE80211_MLD_MAX_NUM_LINKS];
u8 ap_addr[ETH_ALEN] __aligned(2);

View File

@@ -350,6 +350,8 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
/* we hold the RTNL here so can safely walk the list */
list_for_each_entry(nsdata, &local->interfaces, list) {
if (nsdata != sdata && ieee80211_sdata_running(nsdata)) {
struct ieee80211_link_data *link;
/*
* Only OCB and monitor mode may coexist
*/
@@ -376,8 +378,10 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
* will not add another interface while any channel
* switch is active.
*/
if (nsdata->vif.bss_conf.csa_active)
return -EBUSY;
for_each_link_data(nsdata, link) {
if (link->conf->csa_active)
return -EBUSY;
}
/*
* The remaining checks are only performed for interfaces

View File

@@ -987,7 +987,8 @@ void ieee80211_reenable_keys(struct ieee80211_sub_if_data *sdata)
if (ieee80211_sdata_running(sdata)) {
list_for_each_entry(key, &sdata->key_list, list) {
increment_tailroom_need_count(sdata);
if (!(key->flags & KEY_FLAG_TAINTED))
increment_tailroom_need_count(sdata);
ieee80211_key_enable_hw_accel(key);
}
}

View File

@@ -6161,6 +6161,98 @@ static bool ieee80211_get_dtim(const struct cfg80211_bss_ies *ies,
return true;
}
static u16 ieee80211_get_ttlm(u8 bm_size, u8 *data)
{
if (bm_size == 1)
return *data;
return get_unaligned_le16(data);
}
static int
ieee80211_parse_adv_t2l(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_ttlm_elem *ttlm,
struct ieee80211_adv_ttlm_info *ttlm_info)
{
/* The element size was already validated in
* ieee80211_tid_to_link_map_size_ok()
*/
u8 control, link_map_presence, map_size, tid;
u8 *pos;
memset(ttlm_info, 0, sizeof(*ttlm_info));
pos = (void *)ttlm->optional;
control = ttlm->control;
if ((control & IEEE80211_TTLM_CONTROL_DIRECTION) !=
IEEE80211_TTLM_DIRECTION_BOTH) {
sdata_info(sdata, "Invalid advertised T2L map direction\n");
return -EINVAL;
}
link_map_presence = *pos;
pos++;
if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT) {
ttlm_info->switch_time = get_unaligned_le16(pos);
/* Since ttlm_info->switch_time == 0 means no switch time, bump
* it by 1.
*/
if (!ttlm_info->switch_time)
ttlm_info->switch_time = 1;
pos += 2;
}
if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT) {
ttlm_info->duration = pos[0] | pos[1] << 8 | pos[2] << 16;
pos += 3;
}
if (control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP) {
ttlm_info->map = 0xffff;
return 0;
}
if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
map_size = 1;
else
map_size = 2;
/* According to Draft P802.11be_D3.0 clause 35.3.7.1.7, an AP MLD shall
* not advertise a TID-to-link mapping that does not map all TIDs to the
* same link set, reject frame if not all links have mapping
*/
if (link_map_presence != 0xff) {
sdata_info(sdata,
"Invalid advertised T2L mapping presence indicator\n");
return -EINVAL;
}
ttlm_info->map = ieee80211_get_ttlm(map_size, pos);
if (!ttlm_info->map) {
sdata_info(sdata,
"Invalid advertised T2L map for TID 0\n");
return -EINVAL;
}
pos += map_size;
for (tid = 1; tid < 8; tid++) {
u16 map = ieee80211_get_ttlm(map_size, pos);
if (map != ttlm_info->map) {
sdata_info(sdata, "Invalid advertised T2L map for tid %d\n",
tid);
return -EINVAL;
}
pos += map_size;
}
return 0;
}
static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
struct ieee802_11_elems *elems,
@@ -6192,8 +6284,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
continue;
valid_links |= BIT(link_id);
if (assoc_data->link[link_id].disabled)
dormant_links |= BIT(link_id);
if (link_id != assoc_data->assoc_link_id) {
err = ieee80211_sta_allocate_link(sta, link_id);
@@ -6202,6 +6292,33 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
}
/*
* We do not support setting a negotiated TTLM during
* association. As such, we can assume that if there is a TTLM,
* then it is the currently active advertised TTLM.
* In that case, there must be exactly one TTLM that does not
* have a switch time set. This mapping should also leave us
* with at least one usable link.
*/
if (elems->ttlm_num > 1) {
sdata_info(sdata,
"More than one advertised TTLM in association response\n");
goto out_err;
} else if (elems->ttlm_num == 1) {
if (ieee80211_parse_adv_t2l(sdata, elems->ttlm[0],
&sdata->u.mgd.ttlm_info) ||
sdata->u.mgd.ttlm_info.switch_time != 0 ||
!(valid_links & sdata->u.mgd.ttlm_info.map)) {
sdata_info(sdata,
"Invalid advertised TTLM in association response\n");
goto out_err;
}
sdata->u.mgd.ttlm_info.active = true;
dormant_links =
valid_links & ~sdata->u.mgd.ttlm_info.map;
}
ieee80211_vif_set_links(sdata, valid_links, dormant_links);
}
@@ -6992,95 +7109,6 @@ static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy,
sdata->u.mgd.ttlm_info.switch_time = 0;
}
static u16 ieee80211_get_ttlm(u8 bm_size, u8 *data)
{
if (bm_size == 1)
return *data;
else
return get_unaligned_le16(data);
}
static int
ieee80211_parse_adv_t2l(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_ttlm_elem *ttlm,
struct ieee80211_adv_ttlm_info *ttlm_info)
{
/* The element size was already validated in
* ieee80211_tid_to_link_map_size_ok()
*/
u8 control, link_map_presence, map_size, tid;
u8 *pos;
memset(ttlm_info, 0, sizeof(*ttlm_info));
pos = (void *)ttlm->optional;
control = ttlm->control;
if ((control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP) ||
!(control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT))
return 0;
if ((control & IEEE80211_TTLM_CONTROL_DIRECTION) !=
IEEE80211_TTLM_DIRECTION_BOTH) {
sdata_info(sdata, "Invalid advertised T2L map direction\n");
return -EINVAL;
}
link_map_presence = *pos;
pos++;
ttlm_info->switch_time = get_unaligned_le16(pos);
/* Since ttlm_info->switch_time == 0 means no switch time, bump it
* by 1.
*/
if (!ttlm_info->switch_time)
ttlm_info->switch_time = 1;
pos += 2;
if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT) {
ttlm_info->duration = pos[0] | pos[1] << 8 | pos[2] << 16;
pos += 3;
}
if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
map_size = 1;
else
map_size = 2;
/* According to Draft P802.11be_D3.0 clause 35.3.7.1.7, an AP MLD shall
* not advertise a TID-to-link mapping that does not map all TIDs to the
* same link set, reject frame if not all links have mapping
*/
if (link_map_presence != 0xff) {
sdata_info(sdata,
"Invalid advertised T2L mapping presence indicator\n");
return -EINVAL;
}
ttlm_info->map = ieee80211_get_ttlm(map_size, pos);
if (!ttlm_info->map) {
sdata_info(sdata,
"Invalid advertised T2L map for TID 0\n");
return -EINVAL;
}
pos += map_size;
for (tid = 1; tid < 8; tid++) {
u16 map = ieee80211_get_ttlm(map_size, pos);
if (map != ttlm_info->map) {
sdata_info(sdata, "Invalid advertised T2L map for tid %d\n",
tid);
return -EINVAL;
}
pos += map_size;
}
return 0;
}
static void ieee80211_process_adv_ttlm(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
u64 beacon_ts)
@@ -9737,7 +9765,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
req, true, i,
&assoc_data->link[i].conn);
assoc_data->link[i].bss = link_cbss;
assoc_data->link[i].disabled = req->links[i].disabled;
if (!bss->uapsd_supported)
uapsd_supported = false;
@@ -10719,8 +10746,6 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata,
&data->link[link_id].conn);
data->link[link_id].bss = link_cbss;
data->link[link_id].disabled =
req->add_links[link_id].disabled;
data->link[link_id].elems =
(u8 *)req->add_links[link_id].elems;
data->link[link_id].elems_len =

View File

@@ -347,8 +347,13 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
mgmt->da))
return;
} else {
/* Beacons are expected only with broadcast address */
if (!is_broadcast_ether_addr(mgmt->da))
/*
* Non-S1G beacons are expected only with broadcast address.
* S1G beacons only carry the SA so no DA check is required
* nor possible.
*/
if (!ieee80211_is_s1g_beacon(mgmt->frame_control) &&
!is_broadcast_ether_addr(mgmt->da))
return;
}

View File

@@ -752,7 +752,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
unsigned char *dptr;
ax25_cb *ax25s;
int ret;
struct sk_buff *skbn;
struct sk_buff *nskb, *oskb;
/*
* Reject malformed packets early. Check that it contains at least 2
@@ -811,14 +811,16 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
/* We are going to change the netrom headers so we should get our
own skb, we also did not know until now how much header space
we had to reserve... - RXQ */
if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
nskb = skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC);
if (!nskb) {
nr_node_unlock(nr_node);
nr_node_put(nr_node);
dev_put(dev);
return 0;
}
kfree_skb(skb);
skb=skbn;
oskb = skb;
skb = nskb;
skb->data[14]--;
dptr = skb_push(skb, 1);
@@ -837,6 +839,9 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
nr_node_unlock(nr_node);
nr_node_put(nr_node);
if (ret)
kfree_skb(oskb);
return ret;
}

View File

@@ -310,22 +310,23 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
*/
int ovs_vport_get_upcall_stats(struct vport *vport, struct sk_buff *skb)
{
u64 tx_success = 0, tx_fail = 0;
struct nlattr *nla;
int i;
__u64 tx_success = 0;
__u64 tx_fail = 0;
for_each_possible_cpu(i) {
const struct vport_upcall_stats_percpu *stats;
u64 n_success, n_fail;
unsigned int start;
stats = per_cpu_ptr(vport->upcall_stats, i);
do {
start = u64_stats_fetch_begin(&stats->syncp);
tx_success += u64_stats_read(&stats->n_success);
tx_fail += u64_stats_read(&stats->n_fail);
n_success = u64_stats_read(&stats->n_success);
n_fail = u64_stats_read(&stats->n_fail);
} while (u64_stats_fetch_retry(&stats->syncp, start));
tx_success += n_success;
tx_fail += n_fail;
}
nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_UPCALL_STATS);

View File

@@ -387,7 +387,7 @@ struct rxrpc_peer {
struct rb_root service_conns; /* Service connections */
struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
unsigned long app_data; /* Application data (e.g. afs_server) */
time64_t last_tx_at; /* Last time packet sent here */
unsigned int last_tx_at; /* Last time packet sent here (time64_t LSW) */
seqlock_t service_conn_lock;
spinlock_t lock; /* access lock */
int debug_id; /* debug ID for printks */
@@ -1379,6 +1379,13 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
void rxrpc_input_probe_for_pmtud(struct rxrpc_connection *conn, rxrpc_serial_t acked_serial,
bool sendmsg_fail);
/* Update the last transmission time on a peer for keepalive purposes. */
static inline void rxrpc_peer_mark_tx(struct rxrpc_peer *peer)
{
/* To avoid tearing on 32-bit systems, we only keep the LSW. */
WRITE_ONCE(peer->last_tx_at, ktime_get_seconds());
}
/*
* peer_object.c
*/

View File

@@ -194,7 +194,7 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
}
ret = kernel_sendmsg(conn->local->socket, &msg, iov, ioc, len);
conn->peer->last_tx_at = ktime_get_seconds();
rxrpc_peer_mark_tx(conn->peer);
if (ret < 0)
trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret,
rxrpc_tx_point_call_final_resend);

View File

@@ -275,7 +275,7 @@ static void rxrpc_send_ack_packet(struct rxrpc_call *call, int nr_kv, size_t len
rxrpc_local_dont_fragment(conn->local, why == rxrpc_propose_ack_ping_for_mtu_probe);
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
call->peer->last_tx_at = ktime_get_seconds();
rxrpc_peer_mark_tx(call->peer);
if (ret < 0) {
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_ack);
@@ -411,7 +411,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, sizeof(pkt));
ret = do_udp_sendmsg(conn->local->socket, &msg, sizeof(pkt));
conn->peer->last_tx_at = ktime_get_seconds();
rxrpc_peer_mark_tx(conn->peer);
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_abort);
@@ -698,7 +698,7 @@ void rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_send_data_req
ret = 0;
trace_rxrpc_tx_data(call, txb->seq, txb->serial, txb->flags,
rxrpc_txdata_inject_loss);
conn->peer->last_tx_at = ktime_get_seconds();
rxrpc_peer_mark_tx(conn->peer);
goto done;
}
}
@@ -711,7 +711,7 @@ void rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_send_data_req
*/
rxrpc_inc_stat(call->rxnet, stat_tx_data_send);
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
conn->peer->last_tx_at = ktime_get_seconds();
rxrpc_peer_mark_tx(conn->peer);
if (ret == -EMSGSIZE) {
rxrpc_inc_stat(call->rxnet, stat_tx_data_send_msgsize);
@@ -797,7 +797,7 @@ void rxrpc_send_conn_abort(struct rxrpc_connection *conn)
trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
conn->peer->last_tx_at = ktime_get_seconds();
rxrpc_peer_mark_tx(conn->peer);
}
/*
@@ -917,7 +917,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
trace_rxrpc_tx_packet(peer->debug_id, &whdr,
rxrpc_tx_point_version_keepalive);
peer->last_tx_at = ktime_get_seconds();
rxrpc_peer_mark_tx(peer);
_leave("");
}
@@ -973,7 +973,7 @@ void rxrpc_send_response(struct rxrpc_connection *conn, struct sk_buff *response
if (ret < 0)
goto fail;
conn->peer->last_tx_at = ktime_get_seconds();
rxrpc_peer_mark_tx(conn->peer);
return;
fail:

View File

@@ -237,6 +237,21 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, struct sk_buff *skb,
spin_unlock_irq(&peer->lock);
}
/*
* Reconstruct the last transmission time. The difference calculated should be
* valid provided no more than ~68 years elapsed since the last transmission.
*/
static time64_t rxrpc_peer_get_tx_mark(const struct rxrpc_peer *peer, time64_t base)
{
s32 last_tx_at = READ_ONCE(peer->last_tx_at);
s32 base_lsw = base;
s32 diff = last_tx_at - base_lsw;
diff = clamp(diff, -RXRPC_KEEPALIVE_TIME, RXRPC_KEEPALIVE_TIME);
return diff + base;
}
/*
* Perform keep-alive pings.
*/
@@ -265,7 +280,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
spin_unlock_bh(&rxnet->peer_hash_lock);
if (use) {
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
keepalive_at = rxrpc_peer_get_tx_mark(peer, base) + RXRPC_KEEPALIVE_TIME;
slot = keepalive_at - base;
_debug("%02x peer %u t=%d {%pISp}",
cursor, peer->debug_id, slot, &peer->srx.transport);

View File

@@ -296,13 +296,13 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
now = ktime_get_seconds();
seq_printf(seq,
"UDP %-47.47s %-47.47s %3u %4u %5u %6llus %8d %8d\n",
"UDP %-47.47s %-47.47s %3u %4u %5u %6ds %8d %8d\n",
lbuff,
rbuff,
refcount_read(&peer->ref),
peer->cong_ssthresh,
peer->max_data,
now - peer->last_tx_at,
(s32)now - (s32)READ_ONCE(peer->last_tx_at),
READ_ONCE(peer->recent_srtt_us),
READ_ONCE(peer->recent_rto_us));

View File

@@ -518,7 +518,8 @@ try_again:
if (rxrpc_call_has_failed(call))
goto call_failed;
if (!skb_queue_empty(&call->recvmsg_queue))
if (!(flags & MSG_PEEK) &&
!skb_queue_empty(&call->recvmsg_queue))
rxrpc_notify_socket(call);
goto not_yet_complete;
@@ -549,11 +550,21 @@ error_unlock_call:
error_requeue_call:
if (!(flags & MSG_PEEK)) {
spin_lock_irq(&rx->recvmsg_lock);
list_add(&call->recvmsg_link, &rx->recvmsg_q);
spin_unlock_irq(&rx->recvmsg_lock);
if (list_empty(&call->recvmsg_link)) {
list_add(&call->recvmsg_link, &rx->recvmsg_q);
rxrpc_see_call(call, rxrpc_call_see_recvmsg_requeue);
spin_unlock_irq(&rx->recvmsg_lock);
} else if (list_is_first(&call->recvmsg_link, &rx->recvmsg_q)) {
spin_unlock_irq(&rx->recvmsg_lock);
rxrpc_put_call(call, rxrpc_call_see_recvmsg_requeue_first);
} else {
list_move(&call->recvmsg_link, &rx->recvmsg_q);
spin_unlock_irq(&rx->recvmsg_lock);
rxrpc_put_call(call, rxrpc_call_see_recvmsg_requeue_move);
}
trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_requeue, 0);
} else {
rxrpc_put_call(call, rxrpc_call_put_recvmsg);
rxrpc_put_call(call, rxrpc_call_put_recvmsg_peek_nowait);
}
error_no_call:
release_sock(&rx->sk);

View File

@@ -678,7 +678,7 @@ static int rxgk_issue_challenge(struct rxrpc_connection *conn)
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
if (ret > 0)
conn->peer->last_tx_at = ktime_get_seconds();
rxrpc_peer_mark_tx(conn->peer);
__free_page(page);
if (ret < 0) {

View File

@@ -694,7 +694,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
return -EAGAIN;
}
conn->peer->last_tx_at = ktime_get_seconds();
rxrpc_peer_mark_tx(conn->peer);
trace_rxrpc_tx_packet(conn->debug_id, &whdr,
rxrpc_tx_point_rxkad_challenge);
_leave(" = 0");

View File

@@ -821,6 +821,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
/* could be stupid policy setup or mtu config
* so lets be conservative.. */
if ((action == TC_ACT_SHOT) || exceed_mtu) {
drop:
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT;
}
@@ -829,6 +830,8 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
skb_push(skb, skb->dev->hard_header_len);
ife_meta = ife_encode(skb, metalen);
if (!ife_meta)
goto drop;
spin_lock(&ife->tcf_lock);
@@ -844,8 +847,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
if (err < 0) {
/* too corrupt to keep around if overwritten */
spin_unlock(&ife->tcf_lock);
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT;
goto drop;
}
skboff += err;
}

View File

@@ -373,7 +373,7 @@ static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
/* Deschedule class and remove it from its parent aggregate. */
static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
{
if (cl->qdisc->q.qlen > 0) /* class is active */
if (cl_is_active(cl)) /* class is active */
qfq_deactivate_class(q, cl);
qfq_rm_from_agg(q, cl);

View File

@@ -178,6 +178,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
if (m->dev == dev)
return -ELOOP;
if (sch->parent != TC_H_ROOT) {
NL_SET_ERR_MSG_MOD(extack, "teql can only be used as root");
return -EOPNOTSUPP;
}
q->m = m;
skb_queue_head_init(&q->q);

View File

@@ -603,6 +603,11 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net,
sctp_add_cmd_sf(commands, SCTP_CMD_PEER_INIT,
SCTP_PEER_INIT(initchunk));
/* SCTP-AUTH: generate the association shared keys so that
* we can potentially sign the COOKIE-ECHO.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL());
/* Reset init error count upon receipt of INIT-ACK. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
@@ -617,11 +622,6 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_ECHOED));
/* SCTP-AUTH: generate the association shared keys so that
* we can potentially sign the COOKIE-ECHO.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL());
/* 5.1 C) "A" shall then send the State Cookie received in the
* INIT ACK chunk in a COOKIE ECHO chunk, ...
*/

View File

@@ -28,6 +28,7 @@
static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
bool cancel_timeout);
static s64 virtio_transport_has_space(struct virtio_vsock_sock *vvs);
static const struct virtio_transport *
virtio_transport_get_ops(struct vsock_sock *vsk)
@@ -499,9 +500,7 @@ u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
return 0;
spin_lock_bh(&vvs->tx_lock);
ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
if (ret > credit)
ret = credit;
ret = min_t(u32, credit, virtio_transport_has_space(vvs));
vvs->tx_cnt += ret;
vvs->bytes_unsent += ret;
spin_unlock_bh(&vvs->tx_lock);
@@ -822,6 +821,15 @@ virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
}
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);
static u32 virtio_transport_tx_buf_size(struct virtio_vsock_sock *vvs)
{
/* The peer advertises its receive buffer via peer_buf_alloc, but we
* cap it to our local buf_alloc so a remote peer cannot force us to
* queue more data than our own buffer configuration allows.
*/
return min(vvs->peer_buf_alloc, vvs->buf_alloc);
}
int
virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
struct msghdr *msg,
@@ -831,7 +839,7 @@ virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
spin_lock_bh(&vvs->tx_lock);
if (len > vvs->peer_buf_alloc) {
if (len > virtio_transport_tx_buf_size(vvs)) {
spin_unlock_bh(&vvs->tx_lock);
return -EMSGSIZE;
}
@@ -877,12 +885,16 @@ u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk)
}
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data);
static s64 virtio_transport_has_space(struct vsock_sock *vsk)
static s64 virtio_transport_has_space(struct virtio_vsock_sock *vvs)
{
struct virtio_vsock_sock *vvs = vsk->trans;
s64 bytes;
bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
/* Use s64 arithmetic so if the peer shrinks peer_buf_alloc while
* we have bytes in flight (tx_cnt - peer_fwd_cnt), the subtraction
* does not underflow.
*/
bytes = (s64)virtio_transport_tx_buf_size(vvs) -
(vvs->tx_cnt - vvs->peer_fwd_cnt);
if (bytes < 0)
bytes = 0;
@@ -895,7 +907,7 @@ s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
s64 bytes;
spin_lock_bh(&vvs->tx_lock);
bytes = virtio_transport_has_space(vsk);
bytes = virtio_transport_has_space(vvs);
spin_unlock_bh(&vvs->tx_lock);
return bytes;
@@ -1359,9 +1371,11 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
/* Try to copy small packets into the buffer of last packet queued,
* to avoid wasting memory queueing the entire buffer with a small
* payload.
* payload. Skip non-linear (e.g. zerocopy) skbs; these carry payload
* in skb_shinfo.
*/
if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) {
if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue) &&
!skb_is_nonlinear(skb)) {
struct virtio_vsock_hdr *last_hdr;
struct sk_buff *last_skb;
@@ -1490,7 +1504,7 @@ static bool virtio_transport_space_update(struct sock *sk,
spin_lock_bh(&vvs->tx_lock);
vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc);
vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt);
space_available = virtio_transport_has_space(vsk);
space_available = virtio_transport_has_space(vvs);
spin_unlock_bh(&vvs->tx_lock);
return space_available;
}

View File

@@ -12241,9 +12241,6 @@ static int nl80211_process_links(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
}
links[link_id].disabled =
nla_get_flag(attrs[NL80211_ATTR_MLO_LINK_DISABLED]);
}
return 0;
@@ -12423,13 +12420,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
goto free;
}
if (req.links[req.link_id].disabled) {
GENL_SET_ERR_MSG(info,
"cannot have assoc link disabled");
err = -EINVAL;
goto free;
}
if (info->attrs[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS])
req.ext_mld_capa_ops =
nla_get_u16(info->attrs[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS]);

View File

@@ -1561,12 +1561,14 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
tmp = result;
tmp *= SCALE;
do_div(tmp, mcs_divisors[rate->mcs]);
result = tmp;
/* and take NSS, DCM into account */
result = (result * rate->nss) / 8;
tmp *= rate->nss;
do_div(tmp, 8);
if (rate->he_dcm)
result /= 2;
do_div(tmp, 2);
result = tmp;
return result / 10000;
}

View File

@@ -41,7 +41,7 @@ clean distclean:
rm -rf pyynl.egg-info
rm -rf build
install: libynl.a lib/*.h
install: libynl.a lib/*.h ynltool
@echo -e "\tINSTALL libynl.a"
@$(INSTALL) -d $(DESTDIR)$(libdir)
@$(INSTALL) -m 0644 libynl.a $(DESTDIR)$(libdir)/libynl.a
@@ -51,6 +51,7 @@ install: libynl.a lib/*.h
@echo -e "\tINSTALL pyynl"
@pip install --prefix=$(DESTDIR)$(prefix) .
@make -C generated install
@make -C ynltool install
run_tests:
@$(MAKE) -C tests run_tests

View File

@@ -21,7 +21,7 @@ files=$(git grep --files-with-matches '^/\* YNL-GEN \(kernel\|uapi\|user\)')
for f in $files; do
# params: 0 1 2 3
# $YAML YNL-GEN kernel $mode
params=( $(git grep -B1 -h '/\* YNL-GEN' $f | sed 's@/\*\(.*\)\*/@\1@') )
params=( $(git grep --no-line-number -B1 -h '/\* YNL-GEN' $f | sed 's@/\*\(.*\)\*/@\1@') )
args=$(sed -n 's@/\* YNL-ARG \(.*\) \*/@\1@p' $f)
if [ $f -nt ${params[0]} -a -z "$force" ]; then

View File

@@ -48,6 +48,7 @@ TEST_PROGS := \
ipv6_flowlabel.sh \
ipv6_force_forwarding.sh \
ipv6_route_update_soft_lockup.sh \
ipvtap_test.sh \
l2_tos_ttl_inherit.sh \
l2tp.sh \
link_netns.py \

View File

@@ -73,6 +73,8 @@
# +------------------------+
#==============================================================================
source lib.sh
readonly LISTENER=$(mktemp -u listener-XXXXXXXX)
readonly GATEWAY=$(mktemp -u gateway-XXXXXXXX)
readonly RELAY=$(mktemp -u relay-XXXXXXXX)
@@ -246,14 +248,15 @@ test_ipv6_forward()
send_mcast4()
{
sleep 2
sleep 5
wait_local_port_listen ${LISTENER} 4000 udp
ip netns exec "${SOURCE}" bash -c \
'printf "%s %128s" 172.17.0.2 | nc -w 1 -u 239.0.0.1 4000' &
}
send_mcast6()
{
sleep 2
wait_local_port_listen ${LISTENER} 6000 udp
ip netns exec "${SOURCE}" bash -c \
'printf "%s %128s" 2001:db8:3::2 | nc -w 1 -u ff0e::5:6 6000' &
}

Some files were not shown because too many files have changed in this diff Show More