You need to sign in or sign up before continuing.
未验证 提交 0c2046e0 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!389 [sync] PR-385: Backport CVEs and bugfixes

Merge Pull Request from: @openeuler-sync-bot 
 

Origin pull request: 
https://gitee.com/openeuler/kernel/pulls/385 
 
Pull new CVEs:
CVE-2022-3707
CVE-2023-0394

cgroup bugfixes from Cai Xinchen
net bugfixes from Zhengchao Shao and Wang Yufen
fs bugfixes from Baokun Li, Li Lingfeng and Li Nan
scsi bugfix from Zhong Jinghua
mm bugfixes form Liu Shixin
irqchip/gic-v4 bugfix from Zenghui Yu
 
 
Link:https://gitee.com/openeuler/kernel/pulls/389 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -1192,10 +1192,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ...@@ -1192,10 +1192,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
for_each_shadow_entry(sub_spt, &sub_se, sub_index) { for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
start_gfn + sub_index, PAGE_SIZE, &dma_addr); start_gfn + sub_index, PAGE_SIZE, &dma_addr);
if (ret) { if (ret)
ppgtt_invalidate_spt(spt); goto err;
return ret;
}
sub_se.val64 = se->val64; sub_se.val64 = se->val64;
/* Copy the PAT field from PDE. */ /* Copy the PAT field from PDE. */
...@@ -1214,6 +1212,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ...@@ -1214,6 +1212,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_pfn(se, sub_spt->shadow_page.mfn); ops->set_pfn(se, sub_spt->shadow_page.mfn);
ppgtt_set_shadow_entry(spt, se, index); ppgtt_set_shadow_entry(spt, se, index);
return 0; return 0;
err:
/* Cancel the existing addess mappings of DMA addr. */
for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
gvt_vdbg_mm("invalidate 4K entry\n");
ppgtt_invalidate_pte(sub_spt, &sub_se);
}
/* Release the new allocated spt. */
trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
ppgtt_free_spt(sub_spt);
return ret;
} }
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
......
...@@ -3061,18 +3061,12 @@ static int __init allocate_lpi_tables(void) ...@@ -3061,18 +3061,12 @@ static int __init allocate_lpi_tables(void)
return 0; return 0;
} }
static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
{ {
u32 count = 1000000; /* 1s! */ u32 count = 1000000; /* 1s! */
bool clean; bool clean;
u64 val; u64 val;
val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
val &= ~GICR_VPENDBASER_Valid;
val &= ~clr;
val |= set;
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
do { do {
val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
clean = !(val & GICR_VPENDBASER_Dirty); clean = !(val & GICR_VPENDBASER_Dirty);
...@@ -3083,10 +3077,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) ...@@ -3083,10 +3077,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
} }
} while (!clean && count); } while (!clean && count);
if (unlikely(val & GICR_VPENDBASER_Dirty)) { if (unlikely(!clean))
pr_err_ratelimited("ITS virtual pending table not cleaning\n"); pr_err_ratelimited("ITS virtual pending table not cleaning\n");
return val;
}
static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
{
u64 val;
/* Make sure we wait until the RD is done with the initial scan */
val = read_vpend_dirty_clear(vlpi_base);
val &= ~GICR_VPENDBASER_Valid;
val &= ~clr;
val |= set;
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
val = read_vpend_dirty_clear(vlpi_base);
if (unlikely(val & GICR_VPENDBASER_Dirty))
val |= GICR_VPENDBASER_PendingLast; val |= GICR_VPENDBASER_PendingLast;
}
return val; return val;
} }
......
...@@ -1920,7 +1920,7 @@ static void gmac_get_stats64(struct net_device *netdev, ...@@ -1920,7 +1920,7 @@ static void gmac_get_stats64(struct net_device *netdev,
/* Racing with RX NAPI */ /* Racing with RX NAPI */
do { do {
start = u64_stats_fetch_begin(&port->rx_stats_syncp); start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
stats->rx_packets = port->stats.rx_packets; stats->rx_packets = port->stats.rx_packets;
stats->rx_bytes = port->stats.rx_bytes; stats->rx_bytes = port->stats.rx_bytes;
...@@ -1932,11 +1932,11 @@ static void gmac_get_stats64(struct net_device *netdev, ...@@ -1932,11 +1932,11 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_crc_errors = port->stats.rx_crc_errors; stats->rx_crc_errors = port->stats.rx_crc_errors;
stats->rx_frame_errors = port->stats.rx_frame_errors; stats->rx_frame_errors = port->stats.rx_frame_errors;
} while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
/* Racing with MIB and TX completion interrupts */ /* Racing with MIB and TX completion interrupts */
do { do {
start = u64_stats_fetch_begin(&port->ir_stats_syncp); start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
stats->tx_errors = port->stats.tx_errors; stats->tx_errors = port->stats.tx_errors;
stats->tx_packets = port->stats.tx_packets; stats->tx_packets = port->stats.tx_packets;
...@@ -1946,15 +1946,15 @@ static void gmac_get_stats64(struct net_device *netdev, ...@@ -1946,15 +1946,15 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_missed_errors = port->stats.rx_missed_errors; stats->rx_missed_errors = port->stats.rx_missed_errors;
stats->rx_fifo_errors = port->stats.rx_fifo_errors; stats->rx_fifo_errors = port->stats.rx_fifo_errors;
} while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
/* Racing with hard_start_xmit */ /* Racing with hard_start_xmit */
do { do {
start = u64_stats_fetch_begin(&port->tx_stats_syncp); start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
stats->tx_dropped = port->stats.tx_dropped; stats->tx_dropped = port->stats.tx_dropped;
} while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
stats->rx_dropped += stats->rx_missed_errors; stats->rx_dropped += stats->rx_missed_errors;
} }
...@@ -2032,18 +2032,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev, ...@@ -2032,18 +2032,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
/* Racing with MIB interrupt */ /* Racing with MIB interrupt */
do { do {
p = values; p = values;
start = u64_stats_fetch_begin(&port->ir_stats_syncp); start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
for (i = 0; i < RX_STATS_NUM; i++) for (i = 0; i < RX_STATS_NUM; i++)
*p++ = port->hw_stats[i]; *p++ = port->hw_stats[i];
} while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
values = p; values = p;
/* Racing with RX NAPI */ /* Racing with RX NAPI */
do { do {
p = values; p = values;
start = u64_stats_fetch_begin(&port->rx_stats_syncp); start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
for (i = 0; i < RX_STATUS_NUM; i++) for (i = 0; i < RX_STATUS_NUM; i++)
*p++ = port->rx_stats[i]; *p++ = port->rx_stats[i];
...@@ -2051,13 +2051,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev, ...@@ -2051,13 +2051,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*p++ = port->rx_csum_stats[i]; *p++ = port->rx_csum_stats[i];
*p++ = port->rx_napi_exits; *p++ = port->rx_napi_exits;
} while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
values = p; values = p;
/* Racing with TX start_xmit */ /* Racing with TX start_xmit */
do { do {
p = values; p = values;
start = u64_stats_fetch_begin(&port->tx_stats_syncp); start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
for (i = 0; i < TX_MAX_FRAGS; i++) { for (i = 0; i < TX_MAX_FRAGS; i++) {
*values++ = port->tx_frag_stats[i]; *values++ = port->tx_frag_stats[i];
...@@ -2066,7 +2066,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev, ...@@ -2066,7 +2066,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*values++ = port->tx_frags_linearized; *values++ = port->tx_frags_linearized;
*values++ = port->tx_hw_csummed; *values++ = port->tx_hw_csummed;
} while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
} }
static int gmac_get_ksettings(struct net_device *netdev, static int gmac_get_ksettings(struct net_device *netdev,
......
...@@ -172,14 +172,14 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -172,14 +172,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct gve_rx_ring *rx = &priv->rx[ring]; struct gve_rx_ring *rx = &priv->rx[ring];
start = start =
u64_stats_fetch_begin(&priv->rx[ring].statss); u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets; tmp_rx_pkts = rx->rpackets;
tmp_rx_bytes = rx->rbytes; tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt = tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt; rx->rx_desc_err_dropped_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss, } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start)); start));
rx_pkts += tmp_rx_pkts; rx_pkts += tmp_rx_pkts;
rx_bytes += tmp_rx_bytes; rx_bytes += tmp_rx_bytes;
...@@ -193,10 +193,10 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -193,10 +193,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (priv->tx) { if (priv->tx) {
do { do {
start = start =
u64_stats_fetch_begin(&priv->tx[ring].statss); u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_pkts = priv->tx[ring].pkt_done; tmp_tx_pkts = priv->tx[ring].pkt_done;
tmp_tx_bytes = priv->tx[ring].bytes_done; tmp_tx_bytes = priv->tx[ring].bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss, } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start)); start));
tx_pkts += tmp_tx_pkts; tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes; tx_bytes += tmp_tx_bytes;
...@@ -254,13 +254,13 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -254,13 +254,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->cnt; data[i++] = rx->cnt;
do { do {
start = start =
u64_stats_fetch_begin(&priv->rx[ring].statss); u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes; tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt = tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt; rx->rx_desc_err_dropped_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss, } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start)); start));
data[i++] = tmp_rx_bytes; data[i++] = tmp_rx_bytes;
/* rx dropped packets */ /* rx dropped packets */
...@@ -313,9 +313,9 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -313,9 +313,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tx->done; data[i++] = tx->done;
do { do {
start = start =
u64_stats_fetch_begin(&priv->tx[ring].statss); u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_bytes = tx->bytes_done; tmp_tx_bytes = tx->bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss, } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start)); start));
data[i++] = tmp_tx_bytes; data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue; data[i++] = tx->wake_queue;
......
...@@ -40,10 +40,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) ...@@ -40,10 +40,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
do { do {
start = start =
u64_stats_fetch_begin(&priv->rx[ring].statss); u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
packets = priv->rx[ring].rpackets; packets = priv->rx[ring].rpackets;
bytes = priv->rx[ring].rbytes; bytes = priv->rx[ring].rbytes;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss, } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start)); start));
s->rx_packets += packets; s->rx_packets += packets;
s->rx_bytes += bytes; s->rx_bytes += bytes;
...@@ -53,10 +53,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) ...@@ -53,10 +53,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
do { do {
start = start =
u64_stats_fetch_begin(&priv->tx[ring].statss); u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
packets = priv->tx[ring].pkt_done; packets = priv->tx[ring].pkt_done;
bytes = priv->tx[ring].bytes_done; bytes = priv->tx[ring].bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss, } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start)); start));
s->tx_packets += packets; s->tx_packets += packets;
s->tx_bytes += bytes; s->tx_bytes += bytes;
...@@ -1041,9 +1041,9 @@ void gve_handle_report_stats(struct gve_priv *priv) ...@@ -1041,9 +1041,9 @@ void gve_handle_report_stats(struct gve_priv *priv)
if (priv->tx) { if (priv->tx) {
for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
do { do {
start = u64_stats_fetch_begin(&priv->tx[idx].statss); start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
tx_bytes = priv->tx[idx].bytes_done; tx_bytes = priv->tx[idx].bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[idx].statss, start)); } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
stats[stats_idx++] = (struct stats) { stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(TX_WAKE_CNT), .stat_name = cpu_to_be32(TX_WAKE_CNT),
.value = cpu_to_be64(priv->tx[idx].wake_queue), .value = cpu_to_be64(priv->tx[idx].wake_queue),
......
...@@ -375,7 +375,7 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, ...@@ -375,7 +375,7 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq,
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
do { do {
start = u64_stats_fetch_begin(&rxq_stats->syncp); start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
stats->bytes = rxq_stats->bytes; stats->bytes = rxq_stats->bytes;
stats->packets = rxq_stats->packets; stats->packets = rxq_stats->packets;
stats->errors = rxq_stats->csum_errors + stats->errors = rxq_stats->csum_errors +
...@@ -384,7 +384,7 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, ...@@ -384,7 +384,7 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq,
stats->other_errors = rxq_stats->other_errors; stats->other_errors = rxq_stats->other_errors;
stats->dropped = rxq_stats->dropped; stats->dropped = rxq_stats->dropped;
stats->rx_buf_empty = rxq_stats->rx_buf_empty; stats->rx_buf_empty = rxq_stats->rx_buf_empty;
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
} }
......
...@@ -61,7 +61,7 @@ void hinic_txq_get_stats(struct hinic_txq *txq, ...@@ -61,7 +61,7 @@ void hinic_txq_get_stats(struct hinic_txq *txq,
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
do { do {
start = u64_stats_fetch_begin(&txq_stats->syncp); start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
stats->bytes = txq_stats->bytes; stats->bytes = txq_stats->bytes;
stats->packets = txq_stats->packets; stats->packets = txq_stats->packets;
stats->busy = txq_stats->busy; stats->busy = txq_stats->busy;
...@@ -69,7 +69,7 @@ void hinic_txq_get_stats(struct hinic_txq *txq, ...@@ -69,7 +69,7 @@ void hinic_txq_get_stats(struct hinic_txq *txq,
stats->dropped = txq_stats->dropped; stats->dropped = txq_stats->dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts; stats->big_frags_pkts = txq_stats->big_frags_pkts;
stats->big_udp_pkts = txq_stats->big_udp_pkts; stats->big_udp_pkts = txq_stats->big_udp_pkts;
} while (u64_stats_fetch_retry(&txq_stats->syncp, start)); } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
} }
......
...@@ -3373,21 +3373,21 @@ static void nfp_net_stat64(struct net_device *netdev, ...@@ -3373,21 +3373,21 @@ static void nfp_net_stat64(struct net_device *netdev,
unsigned int start; unsigned int start;
do { do {
start = u64_stats_fetch_begin(&r_vec->rx_sync); start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
data[0] = r_vec->rx_pkts; data[0] = r_vec->rx_pkts;
data[1] = r_vec->rx_bytes; data[1] = r_vec->rx_bytes;
data[2] = r_vec->rx_drops; data[2] = r_vec->rx_drops;
} while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
stats->rx_packets += data[0]; stats->rx_packets += data[0];
stats->rx_bytes += data[1]; stats->rx_bytes += data[1];
stats->rx_dropped += data[2]; stats->rx_dropped += data[2];
do { do {
start = u64_stats_fetch_begin(&r_vec->tx_sync); start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
data[0] = r_vec->tx_pkts; data[0] = r_vec->tx_pkts;
data[1] = r_vec->tx_bytes; data[1] = r_vec->tx_bytes;
data[2] = r_vec->tx_errors; data[2] = r_vec->tx_errors;
} while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
stats->tx_packets += data[0]; stats->tx_packets += data[0];
stats->tx_bytes += data[1]; stats->tx_bytes += data[1];
stats->tx_errors += data[2]; stats->tx_errors += data[2];
......
...@@ -498,7 +498,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) ...@@ -498,7 +498,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
unsigned int start; unsigned int start;
do { do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
data[0] = nn->r_vecs[i].rx_pkts; data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
...@@ -506,10 +506,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) ...@@ -506,10 +506,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[3] = nn->r_vecs[i].hw_csum_rx_error; tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail; tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[5] = nn->r_vecs[i].hw_tls_rx; tmp[5] = nn->r_vecs[i].hw_tls_rx;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
do { do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts; data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy; data[2] = nn->r_vecs[i].tx_busy;
tmp[6] = nn->r_vecs[i].hw_csum_tx; tmp[6] = nn->r_vecs[i].hw_csum_tx;
...@@ -519,7 +519,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) ...@@ -519,7 +519,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[10] = nn->r_vecs[i].hw_tls_tx; tmp[10] = nn->r_vecs[i].hw_tls_tx;
tmp[11] = nn->r_vecs[i].tls_tx_fallback; tmp[11] = nn->r_vecs[i].tls_tx_fallback;
tmp[12] = nn->r_vecs[i].tls_tx_no_fallback; tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS; data += NN_RVEC_PER_Q_STATS;
......
...@@ -1176,7 +1176,7 @@ void macvlan_common_setup(struct net_device *dev) ...@@ -1176,7 +1176,7 @@ void macvlan_common_setup(struct net_device *dev)
{ {
ether_setup(dev); ether_setup(dev);
dev->min_mtu = 0; /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */
dev->max_mtu = ETH_MAX_MTU; dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev); netif_keep_dst(dev);
......
...@@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) ...@@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
unsigned int start; unsigned int start;
do { do {
start = u64_stats_fetch_begin(&ns->syncp); start = u64_stats_fetch_begin_irq(&ns->syncp);
stats->tx_bytes = ns->tx_bytes; stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets; stats->tx_packets = ns->tx_packets;
} while (u64_stats_fetch_retry(&ns->syncp, start)); } while (u64_stats_fetch_retry_irq(&ns->syncp, start));
} }
static int static int
......
...@@ -1503,6 +1503,13 @@ void scsi_remove_device(struct scsi_device *sdev) ...@@ -1503,6 +1503,13 @@ void scsi_remove_device(struct scsi_device *sdev)
} }
EXPORT_SYMBOL(scsi_remove_device); EXPORT_SYMBOL(scsi_remove_device);
static int scsi_device_try_get(struct scsi_device *sdev)
{
if (!kobject_get_unless_zero(&sdev->sdev_gendev.kobj))
return -ENXIO;
return 0;
}
static void __scsi_remove_target(struct scsi_target *starget) static void __scsi_remove_target(struct scsi_target *starget)
{ {
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
...@@ -1521,9 +1528,7 @@ static void __scsi_remove_target(struct scsi_target *starget) ...@@ -1521,9 +1528,7 @@ static void __scsi_remove_target(struct scsi_target *starget)
if (sdev->channel != starget->channel || if (sdev->channel != starget->channel ||
sdev->id != starget->id) sdev->id != starget->id)
continue; continue;
if (sdev->sdev_state == SDEV_DEL || if (scsi_device_try_get(sdev))
sdev->sdev_state == SDEV_CANCEL ||
!get_device(&sdev->sdev_gendev))
continue; continue;
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
scsi_remove_device(sdev); scsi_remove_device(sdev);
......
...@@ -1264,6 +1264,9 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) ...@@ -1264,6 +1264,9 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
struct bd_holder_disk *holder; struct bd_holder_disk *holder;
int ret = 0; int ret = 0;
if (bdev->bd_disk == disk)
return -EINVAL;
/* /*
* bdev could be deleted beneath us which would implicitly destroy * bdev could be deleted beneath us which would implicitly destroy
* the holder directory. Hold on to it. * the holder directory. Hold on to it.
......
...@@ -4224,7 +4224,8 @@ int ext4_truncate(struct inode *inode) ...@@ -4224,7 +4224,8 @@ int ext4_truncate(struct inode *inode)
/* If we zero-out tail of the page, we have to create jinode for jbd2 */ /* If we zero-out tail of the page, we have to create jinode for jbd2 */
if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
if (ext4_inode_attach_jinode(inode) < 0) err = ext4_inode_attach_jinode(inode);
if (err)
goto out_trace; goto out_trace;
} }
......
...@@ -6371,7 +6371,7 @@ static int ext4_write_info(struct super_block *sb, int type) ...@@ -6371,7 +6371,7 @@ static int ext4_write_info(struct super_block *sb, int type)
handle_t *handle; handle_t *handle;
/* Data block + inode block */ /* Data block + inode block */
handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2); handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2);
if (IS_ERR(handle)) if (IS_ERR(handle))
return PTR_ERR(handle); return PTR_ERR(handle);
ret = dquot_commit_info(sb, type); ret = dquot_commit_info(sb, type);
......
...@@ -472,10 +472,11 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) ...@@ -472,10 +472,11 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
struct vm_area_struct *vma; struct vm_area_struct *vma;
/* /*
* end == 0 indicates that the entire range after * end == 0 indicates that the entire range after start should be
* start should be unmapped. * unmapped. Note, end is exclusive, whereas the interval tree takes
* an inclusive "last".
*/ */
vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) { vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
unsigned long v_offset; unsigned long v_offset;
unsigned long v_end; unsigned long v_end;
......
...@@ -40,6 +40,7 @@ STATIC void ...@@ -40,6 +40,7 @@ STATIC void
xfs_bui_item_free( xfs_bui_item_free(
struct xfs_bui_log_item *buip) struct xfs_bui_log_item *buip)
{ {
kmem_free(buip->bui_item.li_lv_shadow);
kmem_cache_free(xfs_bui_zone, buip); kmem_cache_free(xfs_bui_zone, buip);
} }
...@@ -199,6 +200,7 @@ xfs_bud_item_release( ...@@ -199,6 +200,7 @@ xfs_bud_item_release(
struct xfs_bud_log_item *budp = BUD_ITEM(lip); struct xfs_bud_log_item *budp = BUD_ITEM(lip);
xfs_bui_release(budp->bud_buip); xfs_bui_release(budp->bud_buip);
kmem_free(budp->bud_item.li_lv_shadow);
kmem_cache_free(xfs_bud_zone, budp); kmem_cache_free(xfs_bud_zone, budp);
} }
......
...@@ -63,6 +63,7 @@ STATIC void ...@@ -63,6 +63,7 @@ STATIC void
xfs_icreate_item_release( xfs_icreate_item_release(
struct xfs_log_item *lip) struct xfs_log_item *lip)
{ {
kmem_free(ICR_ITEM(lip)->ic_item.li_lv_shadow);
kmem_cache_free(xfs_icreate_zone, ICR_ITEM(lip)); kmem_cache_free(xfs_icreate_zone, ICR_ITEM(lip));
} }
......
...@@ -35,6 +35,7 @@ STATIC void ...@@ -35,6 +35,7 @@ STATIC void
xfs_cui_item_free( xfs_cui_item_free(
struct xfs_cui_log_item *cuip) struct xfs_cui_log_item *cuip)
{ {
kmem_free(cuip->cui_item.li_lv_shadow);
if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS) if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
kmem_free(cuip); kmem_free(cuip);
else else
...@@ -204,6 +205,7 @@ xfs_cud_item_release( ...@@ -204,6 +205,7 @@ xfs_cud_item_release(
struct xfs_cud_log_item *cudp = CUD_ITEM(lip); struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
xfs_cui_release(cudp->cud_cuip); xfs_cui_release(cudp->cud_cuip);
kmem_free(cudp->cud_item.li_lv_shadow);
kmem_cache_free(xfs_cud_zone, cudp); kmem_cache_free(xfs_cud_zone, cudp);
} }
......
...@@ -35,6 +35,7 @@ STATIC void ...@@ -35,6 +35,7 @@ STATIC void
xfs_rui_item_free( xfs_rui_item_free(
struct xfs_rui_log_item *ruip) struct xfs_rui_log_item *ruip)
{ {
kmem_free(ruip->rui_item.li_lv_shadow);
if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS) if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
kmem_free(ruip); kmem_free(ruip);
else else
...@@ -227,6 +228,7 @@ xfs_rud_item_release( ...@@ -227,6 +228,7 @@ xfs_rud_item_release(
struct xfs_rud_log_item *rudp = RUD_ITEM(lip); struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
xfs_rui_release(rudp->rud_ruip); xfs_rui_release(rudp->rud_ruip);
kmem_free(rudp->rud_item.li_lv_shadow);
kmem_cache_free(xfs_rud_zone, rudp); kmem_cache_free(xfs_rud_zone, rudp);
} }
......
...@@ -6,11 +6,13 @@ ...@@ -6,11 +6,13 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#ifdef CONFIG_MEMCG_MEMFS_INFO #ifdef CONFIG_MEMCG_MEMFS_INFO
void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m); void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, char *pathbuf,
struct seq_file *m);
int mem_cgroup_memfs_files_show(struct seq_file *m, void *v); int mem_cgroup_memfs_files_show(struct seq_file *m, void *v);
void mem_cgroup_memfs_info_init(void); void mem_cgroup_memfs_info_init(void);
#else #else
static inline void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, static inline void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg,
char *pathbuf,
struct seq_file *m) struct seq_file *m)
{ {
} }
......
...@@ -706,6 +706,7 @@ typedef unsigned char *sk_buff_data_t; ...@@ -706,6 +706,7 @@ typedef unsigned char *sk_buff_data_t;
* @transport_header: Transport layer header * @transport_header: Transport layer header
* @network_header: Network layer header * @network_header: Network layer header
* @mac_header: Link layer header * @mac_header: Link layer header
* @kcov_handle: KCOV remote handle for remote coverage collection
* @scm_io_uring: SKB holds io_uring registered files * @scm_io_uring: SKB holds io_uring registered files
* @tail: Tail pointer * @tail: Tail pointer
* @end: End pointer * @end: End pointer
...@@ -913,6 +914,10 @@ struct sk_buff { ...@@ -913,6 +914,10 @@ struct sk_buff {
__u16 network_header; __u16 network_header;
__u16 mac_header; __u16 mac_header;
#ifdef CONFIG_KCOV
u64 kcov_handle;
#endif
/* private: */ /* private: */
__u32 headers_end[0]; __u32 headers_end[0];
/* public: */ /* public: */
...@@ -4223,9 +4228,6 @@ enum skb_ext_id { ...@@ -4223,9 +4228,6 @@ enum skb_ext_id {
#endif #endif
#if IS_ENABLED(CONFIG_MPTCP) #if IS_ENABLED(CONFIG_MPTCP)
SKB_EXT_MPTCP, SKB_EXT_MPTCP,
#endif
#if IS_ENABLED(CONFIG_KCOV)
SKB_EXT_KCOV_HANDLE,
#endif #endif
SKB_EXT_NUM, /* must be last */ SKB_EXT_NUM, /* must be last */
}; };
...@@ -4681,35 +4683,22 @@ static inline void skb_reset_redirect(struct sk_buff *skb) ...@@ -4681,35 +4683,22 @@ static inline void skb_reset_redirect(struct sk_buff *skb)
#endif #endif
} }
#if IS_ENABLED(CONFIG_KCOV) && IS_ENABLED(CONFIG_SKB_EXTENSIONS)
static inline void skb_set_kcov_handle(struct sk_buff *skb, static inline void skb_set_kcov_handle(struct sk_buff *skb,
const u64 kcov_handle) const u64 kcov_handle)
{ {
/* Do not allocate skb extensions only to set kcov_handle to zero #ifdef CONFIG_KCOV
* (as it is zero by default). However, if the extensions are skb->kcov_handle = kcov_handle;
* already allocated, update kcov_handle anyway since #endif
* skb_set_kcov_handle can be called to zero a previously set
* value.
*/
if (skb_has_extensions(skb) || kcov_handle) {
u64 *kcov_handle_ptr = skb_ext_add(skb, SKB_EXT_KCOV_HANDLE);
if (kcov_handle_ptr)
*kcov_handle_ptr = kcov_handle;
}
} }
static inline u64 skb_get_kcov_handle(struct sk_buff *skb) static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
{ {
u64 *kcov_handle = skb_ext_find(skb, SKB_EXT_KCOV_HANDLE); #ifdef CONFIG_KCOV
return skb->kcov_handle;
return kcov_handle ? *kcov_handle : 0;
}
#else #else
static inline void skb_set_kcov_handle(struct sk_buff *skb, return 0;
const u64 kcov_handle) { } #endif
static inline u64 skb_get_kcov_handle(struct sk_buff *skb) { return 0; } }
#endif /* CONFIG_KCOV && CONFIG_SKB_EXTENSIONS */
static inline bool skb_csum_is_sctp(struct sk_buff *skb) static inline bool skb_csum_is_sctp(struct sk_buff *skb)
{ {
......
...@@ -937,7 +937,7 @@ extern const struct inet_connection_sock_af_ops ipv6_specific; ...@@ -937,7 +937,7 @@ extern const struct inet_connection_sock_af_ops ipv6_specific;
INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); void tcp_v6_early_demux(struct sk_buff *skb);
#endif #endif
......
...@@ -176,6 +176,7 @@ INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); ...@@ -176,6 +176,7 @@ INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
struct udphdr *uh, struct sock *sk); struct udphdr *uh, struct sock *sk);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
void udp_v6_early_demux(struct sk_buff *skb);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
netdev_features_t features, bool is_ipv6); netdev_features_t features, bool is_ipv6);
......
...@@ -57,6 +57,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) ...@@ -57,6 +57,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
int retval = 0; int retval = 0;
mutex_lock(&cgroup_mutex); mutex_lock(&cgroup_mutex);
cpus_read_lock();
percpu_down_write(&cgroup_threadgroup_rwsem); percpu_down_write(&cgroup_threadgroup_rwsem);
for_each_root(root) { for_each_root(root) {
struct cgroup *from_cgrp; struct cgroup *from_cgrp;
...@@ -73,6 +74,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) ...@@ -73,6 +74,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
break; break;
} }
percpu_up_write(&cgroup_threadgroup_rwsem); percpu_up_write(&cgroup_threadgroup_rwsem);
cpus_read_unlock();
mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_mutex);
return retval; return retval;
......
...@@ -2330,6 +2330,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) ...@@ -2330,6 +2330,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
} }
EXPORT_SYMBOL_GPL(task_cgroup_path); EXPORT_SYMBOL_GPL(task_cgroup_path);
/**
* cgroup_attach_lock - Lock for ->attach()
* @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
*
* cgroup migration sometimes needs to stabilize threadgroups against forks and
* exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
* implementations (e.g. cpuset), also need to disable CPU hotplug.
* Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
* lead to deadlocks.
*
* Bringing up a CPU may involve creating and destroying tasks which requires
* read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
* cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
* write-locking threadgroup_rwsem, the locking order is reversed and we end up
* waiting for an on-going CPU hotplug operation which in turn is waiting for
* the threadgroup_rwsem to be released to create new tasks. For more details:
*
* http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
*
* Resolve the situation by always acquiring cpus_read_lock() before optionally
* write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
* CPU hotplug is disabled on entry.
*/
static void cgroup_attach_lock(bool lock_threadgroup)
{
cpus_read_lock();
if (lock_threadgroup)
percpu_down_write(&cgroup_threadgroup_rwsem);
}
/**
* cgroup_attach_unlock - Undo cgroup_attach_lock()
* @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
*/
static void cgroup_attach_unlock(bool lock_threadgroup)
{
if (lock_threadgroup)
percpu_up_write(&cgroup_threadgroup_rwsem);
cpus_read_unlock();
}
/** /**
* cgroup_migrate_add_task - add a migration target task to a migration context * cgroup_migrate_add_task - add a migration target task to a migration context
* @task: target task * @task: target task
...@@ -2804,8 +2845,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, ...@@ -2804,8 +2845,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
} }
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
bool *locked) bool *threadgroup_locked)
__acquires(&cgroup_threadgroup_rwsem)
{ {
struct task_struct *tsk; struct task_struct *tsk;
pid_t pid; pid_t pid;
...@@ -2822,12 +2862,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, ...@@ -2822,12 +2862,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
* Therefore, we can skip the global lock. * Therefore, we can skip the global lock.
*/ */
lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&cgroup_mutex);
if (pid || threadgroup) { *threadgroup_locked = pid || threadgroup;
percpu_down_write(&cgroup_threadgroup_rwsem); cgroup_attach_lock(*threadgroup_locked);
*locked = true;
} else {
*locked = false;
}
rcu_read_lock(); rcu_read_lock();
if (pid) { if (pid) {
...@@ -2858,17 +2894,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, ...@@ -2858,17 +2894,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
goto out_unlock_rcu; goto out_unlock_rcu;
out_unlock_threadgroup: out_unlock_threadgroup:
if (*locked) { cgroup_attach_unlock(*threadgroup_locked);
percpu_up_write(&cgroup_threadgroup_rwsem); *threadgroup_locked = false;
*locked = false;
}
out_unlock_rcu: out_unlock_rcu:
rcu_read_unlock(); rcu_read_unlock();
return tsk; return tsk;
} }
void cgroup_procs_write_finish(struct task_struct *task, bool locked) void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked)
__releases(&cgroup_threadgroup_rwsem)
{ {
struct cgroup_subsys *ss; struct cgroup_subsys *ss;
int ssid; int ssid;
...@@ -2876,8 +2909,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked) ...@@ -2876,8 +2909,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked)
/* release reference from cgroup_procs_write_start() */ /* release reference from cgroup_procs_write_start() */
put_task_struct(task); put_task_struct(task);
if (locked) cgroup_attach_unlock(threadgroup_locked);
percpu_up_write(&cgroup_threadgroup_rwsem);
for_each_subsys(ss, ssid) for_each_subsys(ss, ssid)
if (ss->post_attach) if (ss->post_attach)
ss->post_attach(); ss->post_attach();
...@@ -2954,12 +2987,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ...@@ -2954,12 +2987,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
struct cgroup_subsys_state *d_css; struct cgroup_subsys_state *d_css;
struct cgroup *dsct; struct cgroup *dsct;
struct css_set *src_cset; struct css_set *src_cset;
bool has_tasks;
int ret; int ret;
lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&cgroup_mutex);
percpu_down_write(&cgroup_threadgroup_rwsem);
/* look up all csses currently attached to @cgrp's subtree */ /* look up all csses currently attached to @cgrp's subtree */
spin_lock_irq(&css_set_lock); spin_lock_irq(&css_set_lock);
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
...@@ -2970,6 +3002,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ...@@ -2970,6 +3002,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
} }
spin_unlock_irq(&css_set_lock); spin_unlock_irq(&css_set_lock);
/*
* We need to write-lock threadgroup_rwsem while migrating tasks.
* However, if there are no source csets for @cgrp, changing its
* controllers isn't gonna produce any task migrations and the
* write-locking can be skipped safely.
*/
has_tasks = !list_empty(&mgctx.preloaded_src_csets);
cgroup_attach_lock(has_tasks);
/* NULL dst indicates self on default hierarchy */ /* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(&mgctx); ret = cgroup_migrate_prepare_dst(&mgctx);
if (ret) if (ret)
...@@ -2988,7 +3029,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ...@@ -2988,7 +3029,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
ret = cgroup_migrate_execute(&mgctx); ret = cgroup_migrate_execute(&mgctx);
out_finish: out_finish:
cgroup_migrate_finish(&mgctx); cgroup_migrate_finish(&mgctx);
percpu_up_write(&cgroup_threadgroup_rwsem); cgroup_attach_unlock(has_tasks);
return ret; return ret;
} }
...@@ -4950,13 +4991,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, ...@@ -4950,13 +4991,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
struct task_struct *task; struct task_struct *task;
const struct cred *saved_cred; const struct cred *saved_cred;
ssize_t ret; ssize_t ret;
bool locked; bool threadgroup_locked;
dst_cgrp = cgroup_kn_lock_live(of->kn, false); dst_cgrp = cgroup_kn_lock_live(of->kn, false);
if (!dst_cgrp) if (!dst_cgrp)
return -ENODEV; return -ENODEV;
task = cgroup_procs_write_start(buf, threadgroup, &locked); task = cgroup_procs_write_start(buf, threadgroup, &threadgroup_locked);
ret = PTR_ERR_OR_ZERO(task); ret = PTR_ERR_OR_ZERO(task);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
...@@ -4982,7 +5023,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, ...@@ -4982,7 +5023,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
ret = cgroup_attach_task(dst_cgrp, task, threadgroup); ret = cgroup_attach_task(dst_cgrp, task, threadgroup);
out_finish: out_finish:
cgroup_procs_write_finish(task, locked); cgroup_procs_write_finish(task, threadgroup_locked);
out_unlock: out_unlock:
cgroup_kn_unlock(of->kn); cgroup_kn_unlock(of->kn);
......
...@@ -2220,7 +2220,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) ...@@ -2220,7 +2220,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css); cgroup_taskset_first(tset, &css);
cs = css_cs(css); cs = css_cs(css);
cpus_read_lock(); lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
percpu_down_write(&cpuset_rwsem); percpu_down_write(&cpuset_rwsem);
/* prepare for attach */ /* prepare for attach */
...@@ -2276,7 +2276,6 @@ static void cpuset_attach(struct cgroup_taskset *tset) ...@@ -2276,7 +2276,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
wake_up(&cpuset_attach_wq); wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem); percpu_up_write(&cpuset_rwsem);
cpus_read_unlock();
} }
/* The various types of files and directories in a cpuset file system */ /* The various types of files and directories in a cpuset file system */
......
...@@ -1945,7 +1945,6 @@ config KCOV ...@@ -1945,7 +1945,6 @@ config KCOV
depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
select DEBUG_FS select DEBUG_FS
select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
select SKB_EXTENSIONS if NET
help help
KCOV exposes kernel code coverage information in a form suitable KCOV exposes kernel code coverage information in a form suitable
for coverage-guided fuzzing (randomized testing). for coverage-guided fuzzing (randomized testing).
......
...@@ -157,7 +157,8 @@ static void memfs_show_files_in_mem_cgroup(struct super_block *sb, void *data) ...@@ -157,7 +157,8 @@ static void memfs_show_files_in_mem_cgroup(struct super_block *sb, void *data)
mntput(pfc->vfsmnt); mntput(pfc->vfsmnt);
} }
void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m) void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, char *pathbuf,
struct seq_file *m)
{ {
struct print_files_control pfc = { struct print_files_control pfc = {
.memcg = memcg, .memcg = memcg,
...@@ -165,17 +166,11 @@ void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m) ...@@ -165,17 +166,11 @@ void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m)
.max_print_files = memfs_max_print_files, .max_print_files = memfs_max_print_files,
.size_threshold = memfs_size_threshold, .size_threshold = memfs_size_threshold,
}; };
char *pathbuf;
int i; int i;
if (!memfs_enable || !memcg) if (!memfs_enable || !memcg)
return; return;
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!pathbuf) {
SEQ_printf(m, "Show memfs failed due to OOM\n");
return;
}
pfc.pathbuf = pathbuf; pfc.pathbuf = pathbuf;
pfc.pathbuf_size = PATH_MAX; pfc.pathbuf_size = PATH_MAX;
...@@ -192,15 +187,20 @@ void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m) ...@@ -192,15 +187,20 @@ void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m)
SEQ_printf(m, "total files: %lu, total memory-size: %lukB\n", SEQ_printf(m, "total files: %lu, total memory-size: %lukB\n",
pfc.total_print_files, pfc.total_files_size >> 10); pfc.total_print_files, pfc.total_files_size >> 10);
} }
kfree(pfc.pathbuf);
} }
int mem_cgroup_memfs_files_show(struct seq_file *m, void *v) int mem_cgroup_memfs_files_show(struct seq_file *m, void *v)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
char *pathbuf;
mem_cgroup_print_memfs_info(memcg, m); pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!pathbuf) {
SEQ_printf(m, "Show memfs abort: failed to allocate memory\n");
return 0;
}
mem_cgroup_print_memfs_info(memcg, pathbuf, m);
kfree(pathbuf);
return 0; return 0;
} }
......
...@@ -1509,14 +1509,12 @@ static int __init memory_stats_init(void) ...@@ -1509,14 +1509,12 @@ static int __init memory_stats_init(void)
} }
pure_initcall(memory_stats_init); pure_initcall(memory_stats_init);
static char *memory_stat_format(struct mem_cgroup *memcg) static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
{ {
struct seq_buf s; struct seq_buf s;
int i; int i;
seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); seq_buf_init(&s, buf, bufsize);
if (!s.buffer)
return NULL;
/* /*
* Provide statistics on the state of the memory subsystem as * Provide statistics on the state of the memory subsystem as
...@@ -1576,8 +1574,6 @@ static char *memory_stat_format(struct mem_cgroup *memcg) ...@@ -1576,8 +1574,6 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
/* The above should easily fit into one page */ /* The above should easily fit into one page */
WARN_ON_ONCE(seq_buf_has_overflowed(&s)); WARN_ON_ONCE(seq_buf_has_overflowed(&s));
return s.buffer;
} }
#define K(x) ((x) << (PAGE_SHIFT-10)) #define K(x) ((x) << (PAGE_SHIFT-10))
...@@ -1613,7 +1609,11 @@ void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct * ...@@ -1613,7 +1609,11 @@ void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *
*/ */
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{ {
char *buf; /* Use static buffer, for the caller is holding oom_lock. */
static char buf[PAGE_SIZE];
static char pathbuf[PATH_MAX];
lockdep_assert_held(&oom_lock);
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memory)), K((u64)page_counter_read(&memcg->memory)),
...@@ -1634,13 +1634,10 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) ...@@ -1634,13 +1634,10 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
pr_info("Memory cgroup stats for "); pr_info("Memory cgroup stats for ");
pr_cont_cgroup_path(memcg->css.cgroup); pr_cont_cgroup_path(memcg->css.cgroup);
pr_cont(":"); pr_cont(":");
buf = memory_stat_format(memcg); memory_stat_format(memcg, buf, sizeof(buf));
if (!buf)
return;
pr_info("%s", buf); pr_info("%s", buf);
kfree(buf);
mem_cgroup_print_memfs_info(memcg, NULL); mem_cgroup_print_memfs_info(memcg, pathbuf, NULL);
} }
/* /*
...@@ -6912,11 +6909,11 @@ static int memory_events_local_show(struct seq_file *m, void *v) ...@@ -6912,11 +6909,11 @@ static int memory_events_local_show(struct seq_file *m, void *v)
static int memory_stat_show(struct seq_file *m, void *v) static int memory_stat_show(struct seq_file *m, void *v)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_seq(m); struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
char *buf; char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
buf = memory_stat_format(memcg);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
memory_stat_format(memcg, buf, PAGE_SIZE);
seq_puts(m, buf); seq_puts(m, buf);
kfree(buf); kfree(buf);
return 0; return 0;
......
...@@ -4334,9 +4334,6 @@ static const u8 skb_ext_type_len[] = { ...@@ -4334,9 +4334,6 @@ static const u8 skb_ext_type_len[] = {
#if IS_ENABLED(CONFIG_MPTCP) #if IS_ENABLED(CONFIG_MPTCP)
[SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
#endif #endif
#if IS_ENABLED(CONFIG_KCOV)
[SKB_EXT_KCOV_HANDLE] = SKB_EXT_CHUNKSIZEOF(u64),
#endif
}; };
static __always_inline unsigned int skb_ext_total_length(void) static __always_inline unsigned int skb_ext_total_length(void)
...@@ -4353,9 +4350,6 @@ static __always_inline unsigned int skb_ext_total_length(void) ...@@ -4353,9 +4350,6 @@ static __always_inline unsigned int skb_ext_total_length(void)
#endif #endif
#if IS_ENABLED(CONFIG_MPTCP) #if IS_ENABLED(CONFIG_MPTCP)
skb_ext_type_len[SKB_EXT_MPTCP] + skb_ext_type_len[SKB_EXT_MPTCP] +
#endif
#if IS_ENABLED(CONFIG_KCOV)
skb_ext_type_len[SKB_EXT_KCOV_HANDLE] +
#endif #endif
0; 0;
} }
......
...@@ -1730,12 +1730,7 @@ static const struct net_protocol igmp_protocol = { ...@@ -1730,12 +1730,7 @@ static const struct net_protocol igmp_protocol = {
}; };
#endif #endif
/* thinking of making this const? Don't. static const struct net_protocol tcp_protocol = {
* early_demux can change based on sysctl.
*/
static struct net_protocol tcp_protocol = {
.early_demux = tcp_v4_early_demux,
.early_demux_handler = tcp_v4_early_demux,
.handler = tcp_v4_rcv, .handler = tcp_v4_rcv,
.err_handler = tcp_v4_err, .err_handler = tcp_v4_err,
.no_policy = 1, .no_policy = 1,
...@@ -1743,12 +1738,7 @@ static struct net_protocol tcp_protocol = { ...@@ -1743,12 +1738,7 @@ static struct net_protocol tcp_protocol = {
.icmp_strict_tag_validation = 1, .icmp_strict_tag_validation = 1,
}; };
/* thinking of making this const? Don't. static const struct net_protocol udp_protocol = {
* early_demux can change based on sysctl.
*/
static struct net_protocol udp_protocol = {
.early_demux = udp_v4_early_demux,
.early_demux_handler = udp_v4_early_demux,
.handler = udp_rcv, .handler = udp_rcv,
.err_handler = udp_err, .err_handler = udp_err,
.no_policy = 1, .no_policy = 1,
......
...@@ -309,14 +309,13 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph, ...@@ -309,14 +309,13 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
ip_hdr(hint)->tos == iph->tos; ip_hdr(hint)->tos == iph->tos;
} }
INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *)); int tcp_v4_early_demux(struct sk_buff *skb);
INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *)); int udp_v4_early_demux(struct sk_buff *skb);
static int ip_rcv_finish_core(struct net *net, struct sock *sk, static int ip_rcv_finish_core(struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *dev, struct sk_buff *skb, struct net_device *dev,
const struct sk_buff *hint) const struct sk_buff *hint)
{ {
const struct iphdr *iph = ip_hdr(skb); const struct iphdr *iph = ip_hdr(skb);
int (*edemux)(struct sk_buff *skb);
struct rtable *rt; struct rtable *rt;
int err; int err;
...@@ -327,21 +326,29 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, ...@@ -327,21 +326,29 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
goto drop_error; goto drop_error;
} }
if (net->ipv4.sysctl_ip_early_demux && if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
!skb_dst(skb) && !skb_dst(skb) &&
!skb->sk && !skb->sk &&
!ip_is_fragment(iph)) { !ip_is_fragment(iph)) {
const struct net_protocol *ipprot; switch (iph->protocol) {
int protocol = iph->protocol; case IPPROTO_TCP:
if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) {
ipprot = rcu_dereference(inet_protos[protocol]); tcp_v4_early_demux(skb);
if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux, /* must reload iph, skb->head might have changed */
udp_v4_early_demux, skb); iph = ip_hdr(skb);
if (unlikely(err)) }
goto drop_error; break;
/* must reload iph, skb->head might have changed */ case IPPROTO_UDP:
iph = ip_hdr(skb); if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
err = udp_v4_early_demux(skb);
if (unlikely(err))
goto drop_error;
/* must reload iph, skb->head might have changed */
iph = ip_hdr(skb);
}
break;
} }
} }
......
...@@ -361,61 +361,6 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, ...@@ -361,61 +361,6 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
return ret; return ret;
} }
static void proc_configure_early_demux(int enabled, int protocol)
{
struct net_protocol *ipprot;
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_protocol *ip6prot;
#endif
rcu_read_lock();
ipprot = rcu_dereference(inet_protos[protocol]);
if (ipprot)
ipprot->early_demux = enabled ? ipprot->early_demux_handler :
NULL;
#if IS_ENABLED(CONFIG_IPV6)
ip6prot = rcu_dereference(inet6_protos[protocol]);
if (ip6prot)
ip6prot->early_demux = enabled ? ip6prot->early_demux_handler :
NULL;
#endif
rcu_read_unlock();
}
static int proc_tcp_early_demux(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = 0;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && !ret) {
int enabled = init_net.ipv4.sysctl_tcp_early_demux;
proc_configure_early_demux(enabled, IPPROTO_TCP);
}
return ret;
}
static int proc_udp_early_demux(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = 0;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && !ret) {
int enabled = init_net.ipv4.sysctl_udp_early_demux;
proc_configure_early_demux(enabled, IPPROTO_UDP);
}
return ret;
}
static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table, static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
int write, void *buffer, int write, void *buffer,
size_t *lenp, loff_t *ppos) size_t *lenp, loff_t *ppos)
...@@ -727,14 +672,14 @@ static struct ctl_table ipv4_net_table[] = { ...@@ -727,14 +672,14 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_udp_early_demux, .data = &init_net.ipv4.sysctl_udp_early_demux,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = proc_udp_early_demux .proc_handler = proc_douintvec_minmax,
}, },
{ {
.procname = "tcp_early_demux", .procname = "tcp_early_demux",
.data = &init_net.ipv4.sysctl_tcp_early_demux, .data = &init_net.ipv4.sysctl_tcp_early_demux,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = proc_tcp_early_demux .proc_handler = proc_douintvec_minmax,
}, },
{ {
.procname = "nexthop_compat_mode", .procname = "nexthop_compat_mode",
......
...@@ -44,21 +44,25 @@ ...@@ -44,21 +44,25 @@
#include <net/inet_ecn.h> #include <net/inet_ecn.h>
#include <net/dst_metadata.h> #include <net/dst_metadata.h>
INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *)); void udp_v6_early_demux(struct sk_buff *);
INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *)); void tcp_v6_early_demux(struct sk_buff *);
static void ip6_rcv_finish_core(struct net *net, struct sock *sk, static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
struct sk_buff *skb) struct sk_buff *skb)
{ {
void (*edemux)(struct sk_buff *skb); if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
!skb_dst(skb) && !skb->sk) {
if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { switch (ipv6_hdr(skb)->nexthdr) {
const struct inet6_protocol *ipprot; case IPPROTO_TCP:
if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux))
ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); tcp_v6_early_demux(skb);
if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) break;
INDIRECT_CALL_2(edemux, tcp_v6_early_demux, case IPPROTO_UDP:
udp_v6_early_demux, skb); if (READ_ONCE(net->ipv4.sysctl_udp_early_demux))
udp_v6_early_demux(skb);
break;
}
} }
if (!skb_valid_dst(skb)) if (!skb_valid_dst(skb))
ip6_route_input(skb); ip6_route_input(skb);
} }
......
...@@ -539,6 +539,7 @@ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, ...@@ -539,6 +539,7 @@ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct raw6_sock *rp) struct raw6_sock *rp)
{ {
struct ipv6_txoptions *opt;
struct sk_buff *skb; struct sk_buff *skb;
int err = 0; int err = 0;
int offset; int offset;
...@@ -556,6 +557,9 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, ...@@ -556,6 +557,9 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
offset = rp->offset; offset = rp->offset;
total_len = inet_sk(sk)->cork.base.length; total_len = inet_sk(sk)->cork.base.length;
opt = inet6_sk(sk)->cork.opt;
total_len -= opt ? opt->opt_flen : 0;
if (offset >= total_len - 1) { if (offset >= total_len - 1) {
err = -EINVAL; err = -EINVAL;
ip6_flush_pending_frames(sk); ip6_flush_pending_frames(sk);
......
...@@ -1820,7 +1820,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1820,7 +1820,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
goto discard_it; goto discard_it;
} }
INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) void tcp_v6_early_demux(struct sk_buff *skb)
{ {
const struct ipv6hdr *hdr; const struct ipv6hdr *hdr;
const struct tcphdr *th; const struct tcphdr *th;
...@@ -2171,12 +2171,7 @@ struct proto tcpv6_prot = { ...@@ -2171,12 +2171,7 @@ struct proto tcpv6_prot = {
}; };
EXPORT_SYMBOL_GPL(tcpv6_prot); EXPORT_SYMBOL_GPL(tcpv6_prot);
/* thinking of making this const? Don't. static const struct inet6_protocol tcpv6_protocol = {
* early_demux can change based on sysctl.
*/
static struct inet6_protocol tcpv6_protocol = {
.early_demux = tcp_v6_early_demux,
.early_demux_handler = tcp_v6_early_demux,
.handler = tcp_v6_rcv, .handler = tcp_v6_rcv,
.err_handler = tcp_v6_err, .err_handler = tcp_v6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
......
...@@ -1026,7 +1026,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net, ...@@ -1026,7 +1026,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
return NULL; return NULL;
} }
INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) void udp_v6_early_demux(struct sk_buff *skb)
{ {
struct net *net = dev_net(skb->dev); struct net *net = dev_net(skb->dev);
const struct udphdr *uh; const struct udphdr *uh;
...@@ -1639,12 +1639,7 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname, ...@@ -1639,12 +1639,7 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname,
return ipv6_getsockopt(sk, level, optname, optval, optlen); return ipv6_getsockopt(sk, level, optname, optval, optlen);
} }
/* thinking of making this const? Don't. static const struct inet6_protocol udpv6_protocol = {
* early_demux can change based on sysctl.
*/
static struct inet6_protocol udpv6_protocol = {
.early_demux = udp_v6_early_demux,
.early_demux_handler = udp_v6_early_demux,
.handler = udpv6_rcv, .handler = udpv6_rcv,
.err_handler = udpv6_err, .err_handler = udpv6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
......
...@@ -2175,9 +2175,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, ...@@ -2175,9 +2175,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
u64 value; u64 value;
do { do {
start = u64_stats_fetch_begin(&rxstats->syncp); start = u64_stats_fetch_begin_irq(&rxstats->syncp);
value = rxstats->msdu[tid]; value = rxstats->msdu[tid];
} while (u64_stats_fetch_retry(&rxstats->syncp, start)); } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
return value; return value;
} }
...@@ -2241,9 +2241,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) ...@@ -2241,9 +2241,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
u64 value; u64 value;
do { do {
start = u64_stats_fetch_begin(&rxstats->syncp); start = u64_stats_fetch_begin_irq(&rxstats->syncp);
value = rxstats->bytes; value = rxstats->bytes;
} while (u64_stats_fetch_retry(&rxstats->syncp, start)); } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
return value; return value;
} }
......
...@@ -1078,9 +1078,9 @@ static void mpls_get_stats(struct mpls_dev *mdev, ...@@ -1078,9 +1078,9 @@ static void mpls_get_stats(struct mpls_dev *mdev,
p = per_cpu_ptr(mdev->stats, i); p = per_cpu_ptr(mdev->stats, i);
do { do {
start = u64_stats_fetch_begin(&p->syncp); start = u64_stats_fetch_begin_irq(&p->syncp);
local = p->stats; local = p->stats;
} while (u64_stats_fetch_retry(&p->syncp, start)); } while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += local.rx_packets; stats->rx_packets += local.rx_packets;
stats->rx_bytes += local.rx_bytes; stats->rx_bytes += local.rx_bytes;
......
...@@ -1081,12 +1081,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, ...@@ -1081,12 +1081,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
skip: skip:
if (!ingress) { if (!ingress) {
notify_and_destroy(net, skb, n, classid, old = rtnl_dereference(dev->qdisc);
rtnl_dereference(dev->qdisc), new);
if (new && !new->ops->attach) if (new && !new->ops->attach)
qdisc_refcount_inc(new); qdisc_refcount_inc(new);
rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc); rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
notify_and_destroy(net, skb, n, classid, old, new);
if (new && new->ops->attach) if (new && new->ops->attach)
new->ops->attach(new); new->ops->attach(new);
} else { } else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册