提交 a90f704a 编写于 作者: D David S. Miller

Merge branch 'net-Optimize-the-qed-allocations-inside-kdump-kernel'

Bhupesh Sharma says:

====================
net: Optimize the qed* allocations inside kdump kernel

Changes since v1:
----------------
- v1 can be seen here: http://lists.infradead.org/pipermail/kexec/2020-May/024935.html
- Addressed review comments received on v1:
  * Removed unnecessary paranthesis.
  * Used a different macro for minimum RX/TX ring count value in kdump
    kernel.

Since kdump kernel(s) run under severe memory constraint with the
basic idea being to save the crashdump vmcore reliably when the primary
kernel panics/hangs, large memory allocations done by a network driver
can cause the crashkernel to panic with OOM.

The qed* drivers take up approximately 214MB memory when run in the
kdump kernel with the default configuration settings presently used in
the driver. With an usual crashkernel size of 512M, this allocation
is equal to almost half of the total crashkernel size allocated.

See some logs obtained via memstrack tool (see [1]) below:
 dracut-pre-pivot[676]: ======== Report format module_summary: ========
 dracut-pre-pivot[676]: Module qed using 149.6MB (2394 pages), peak allocation 149.6MB (2394 pages)
 dracut-pre-pivot[676]: Module qede using 65.3MB (1045 pages), peak allocation 65.3MB (1045 pages)

This patchset tries to reduce the overall memory allocation profile of
the qed* driver when they run in the kdump kernel. With these
optimization we can see a saving of approx 85M in the kdump kernel:
 dracut-pre-pivot[671]: ======== Report format module_summary: ========
 dracut-pre-pivot[671]: Module qed using 124.6MB (1993 pages), peak allocation 124.7MB (1995 pages)
 <..snip..>
 dracut-pre-pivot[671]: Module qede using 4.6MB (73 pages), peak allocation 4.6MB (74 pages)

And the kdump kernel can save vmcore successfully via both ssh and nfs
interfaces.

This patchset contains two patches:
[PATCH 1/2] - Reduces the default TX and RX ring count in kdump kernel.
[PATCH 2/2] - Disables qed SRIOV feature in kdump kernel (as it is
              normally not a supported kdump target for saving
	      vmcore).

[1]. Memstrack tool: https://github.com/ryncsn/memstrack
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#ifndef _QED_SRIOV_H #ifndef _QED_SRIOV_H
#define _QED_SRIOV_H #define _QED_SRIOV_H
#include <linux/crash_dump.h>
#include <linux/types.h> #include <linux/types.h>
#include "qed_vf.h" #include "qed_vf.h"
...@@ -40,9 +41,12 @@ ...@@ -40,9 +41,12 @@
#define QED_VF_ARRAY_LENGTH (3) #define QED_VF_ARRAY_LENGTH (3)
#ifdef CONFIG_QED_SRIOV #ifdef CONFIG_QED_SRIOV
#define IS_VF(cdev) ((cdev)->b_is_vf) #define IS_VF(cdev) (is_kdump_kernel() ? \
#define IS_PF(cdev) (!((cdev)->b_is_vf)) (0) : ((cdev)->b_is_vf))
#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) #define IS_PF(cdev) (is_kdump_kernel() ? \
(1) : !((cdev)->b_is_vf))
#define IS_PF_SRIOV(p_hwfn) (is_kdump_kernel() ? \
(0) : !!((p_hwfn)->cdev->p_iov_info))
#else #else
#define IS_VF(cdev) (0) #define IS_VF(cdev) (0)
#define IS_PF(cdev) (1) #define IS_PF(cdev) (1)
......
...@@ -575,12 +575,14 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, ...@@ -575,12 +575,14 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128 #define NUM_RX_BDS_MIN 128
#define NUM_RX_BDS_KDUMP_MIN 63
#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1) #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13 #define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
#define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_KDUMP_MIN 63
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
#define QEDE_MIN_PKT_LEN 64 #define QEDE_MIN_PKT_LEN 64
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/crash_dump.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/version.h> #include <linux/version.h>
...@@ -715,8 +716,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, ...@@ -715,8 +716,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->dp_module = dp_module; edev->dp_module = dp_module;
edev->dp_level = dp_level; edev->dp_level = dp_level;
edev->ops = qed_ops; edev->ops = qed_ops;
edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
edev->q_num_tx_buffers = NUM_TX_BDS_DEF; if (is_kdump_kernel()) {
edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
} else {
edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
}
DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
info->num_queues, info->num_queues); info->num_queues, info->num_queues);
...@@ -1207,7 +1214,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1207,7 +1214,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case QEDE_PRIVATE_VF: case QEDE_PRIVATE_VF:
if (debug & QED_LOG_VERBOSE_MASK) if (debug & QED_LOG_VERBOSE_MASK)
dev_err(&pdev->dev, "Probing a VF\n"); dev_err(&pdev->dev, "Probing a VF\n");
is_vf = true; is_vf = is_kdump_kernel() ? false : true;
break; break;
default: default:
if (debug & QED_LOG_VERBOSE_MASK) if (debug & QED_LOG_VERBOSE_MASK)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册