提交 f242d93a 编写于 作者: L Leon Romanovsky 提交者: Doug Ledford

IB/hfi1: Avoid large frame size warning

When CONFIG_FRAME_WARN is set to 1024 bytes, which is useful to find
stack consumers, we get a warning in hfi1 driver.

drivers/infiniband/hw/hfi1/affinity.c: In function
‘hfi1_get_proc_affinity’:
drivers/infiniband/hw/hfi1/affinity.c:415:1: warning: the frame size of
1056 bytes is larger than 1024 bytes [-Wframe-larger-than=]

This change removes unneeded buf[1024] declaration and usage.

Fixes: f48ad614 ("IB/hfi1: Move driver out of staging")
Signed-off-by: NLeon Romanovsky <leonro@mellanox.com>
Acked-by: NDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 a8b7da58
......@@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
const struct cpumask *node_mask,
*proc_mask = tsk_cpus_allowed(current);
struct cpu_mask_set *set = &dd->affinity->proc;
char buf[1024];
/*
* check whether process/context affinity has already
* been set
*/
if (cpumask_weight(proc_mask) == 1) {
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
current->pid, current->comm, buf);
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
/*
* Mark the pre-set CPU as used. This is atomic so we don't
* need the lock
......@@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_set_cpu(cpu, &set->used);
goto done;
} else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
current->pid, current->comm, buf);
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
goto done;
}
......@@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
&dd->affinity->rcv_intr.mask :
&dd->affinity->rcv_intr.used));
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
cpumask_pr_args(intrs));
/*
* If we don't have a NUMA node requested, preference is towards
......@@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
if (node == -1)
node = dd->node;
node_mask = cpumask_of_node(node);
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
cpumask_pr_args(node_mask));
/* diff will hold all unused cpus */
cpumask_andnot(diff, &set->mask, &set->used);
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
/* get cpumask of available CPUs on preferred NUMA */
cpumask_and(mask, diff, node_mask);
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
/*
* At first, we don't want to place processes on the same
......@@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_andnot(diff, &set->mask, &set->used);
cpumask_andnot(mask, diff, node_mask);
}
scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
cpumask_pr_args(mask));
cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids) /* empty */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册