Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
4c4de7d3
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
4c4de7d3
编写于
9月 29, 2017
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by:
N
Ingo Molnar
<
mingo@kernel.org
>
上级
a19b2e3d
441430eb
变更
42
隐藏空白更改
内联
并排
Showing
42 changed file
with
363 addition
and
257 deletion
+363
-257
Documentation/cpu-freq/index.txt
Documentation/cpu-freq/index.txt
+0
-2
drivers/acpi/apei/ghes.c
drivers/acpi/apei/ghes.c
+9
-7
drivers/base/power/opp/core.c
drivers/base/power/opp/core.c
+7
-0
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/cpufreq-dt-platdev.c
+4
-0
drivers/infiniband/core/security.c
drivers/infiniband/core/security.c
+3
-1
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_cmd.c
+7
-7
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/chip.c
+78
-23
drivers/infiniband/hw/hfi1/chip.h
drivers/infiniband/hw/hfi1/chip.h
+2
-1
drivers/infiniband/hw/hfi1/eprom.c
drivers/infiniband/hw/hfi1/eprom.c
+15
-5
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/file_ops.c
+22
-19
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/hfi1/pcie.c
+21
-29
drivers/infiniband/hw/hfi1/platform.c
drivers/infiniband/hw/hfi1/platform.c
+3
-1
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/main.c
+5
-5
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/mlx5/mem.c
+17
-30
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/mr.c
+17
-10
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/nes/nes_verbs.c
+2
-2
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
+0
-13
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
+11
-4
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+22
-8
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_memory.c
+1
-1
fs/iomap.c
fs/iomap.c
+7
-7
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_ag_resv.c
+10
-2
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.c
+2
-15
fs/xfs/xfs_aops.c
fs/xfs/xfs_aops.c
+2
-1
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_bmap_util.c
+13
-1
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.c
+0
-2
fs/xfs/xfs_error.c
fs/xfs/xfs_error.c
+1
-1
fs/xfs/xfs_file.c
fs/xfs/xfs_file.c
+21
-18
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.c
+5
-3
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_inode_item.c
+1
-1
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_ioctl.c
+2
-1
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.c
+5
-2
fs/xfs/xfs_iomap.h
fs/xfs/xfs_iomap.h
+1
-1
fs/xfs/xfs_pnfs.c
fs/xfs/xfs_pnfs.c
+1
-1
fs/xfs/xfs_super.c
fs/xfs/xfs_super.c
+10
-0
include/rdma/ib_verbs.h
include/rdma/ib_verbs.h
+3
-3
include/uapi/rdma/ib_user_verbs.h
include/uapi/rdma/ib_user_verbs.h
+1
-1
kernel/events/ring_buffer.c
kernel/events/ring_buffer.c
+15
-5
kernel/seccomp.c
kernel/seccomp.c
+16
-7
net/bluetooth/Kconfig
net/bluetooth/Kconfig
+0
-10
net/bluetooth/hci_sock.c
net/bluetooth/hci_sock.c
+0
-6
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/frwr_ops.c
+1
-1
未找到文件。
Documentation/cpu-freq/index.txt
浏览文件 @
4c4de7d3
...
...
@@ -32,8 +32,6 @@ cpufreq-stats.txt - General description of sysfs cpufreq stats.
index.txt - File index, Mailing list and Links (this document)
intel-pstate.txt - Intel pstate cpufreq driver specific file.
pcc-cpufreq.txt - PCC cpufreq driver specific file.
...
...
drivers/acpi/apei/ghes.c
浏览文件 @
4c4de7d3
...
...
@@ -743,17 +743,19 @@ static int ghes_proc(struct ghes *ghes)
}
ghes_do_proc
(
ghes
,
ghes
->
estatus
);
out:
ghes_clear_estatus
(
ghes
);
if
(
rc
==
-
ENOENT
)
return
rc
;
/*
* GHESv2 type HEST entries introduce support for error acknowledgment,
* so only acknowledge the error if this support is present.
*/
if
(
is_hest_type_generic_v2
(
ghes
))
{
rc
=
ghes_ack_error
(
ghes
->
generic_v2
);
if
(
rc
)
return
rc
;
}
out:
ghes_clear_estatus
(
ghes
);
if
(
is_hest_type_generic_v2
(
ghes
))
return
ghes_ack_error
(
ghes
->
generic_v2
);
return
rc
;
}
...
...
drivers/base/power/opp/core.c
浏览文件 @
4c4de7d3
...
...
@@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
opp
->
available
=
availability_req
;
dev_pm_opp_get
(
opp
);
mutex_unlock
(
&
opp_table
->
lock
);
/* Notify the change of the OPP availability */
if
(
availability_req
)
blocking_notifier_call_chain
(
&
opp_table
->
head
,
OPP_EVENT_ENABLE
,
...
...
@@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
blocking_notifier_call_chain
(
&
opp_table
->
head
,
OPP_EVENT_DISABLE
,
opp
);
dev_pm_opp_put
(
opp
);
goto
put_table
;
unlock:
mutex_unlock
(
&
opp_table
->
lock
);
put_table:
dev_pm_opp_put_opp_table
(
opp_table
);
return
r
;
}
...
...
drivers/cpufreq/cpufreq-dt-platdev.c
浏览文件 @
4c4de7d3
...
...
@@ -118,6 +118,10 @@ static const struct of_device_id blacklist[] __initconst = {
{
.
compatible
=
"sigma,tango4"
,
},
{
.
compatible
=
"ti,am33xx"
,
},
{
.
compatible
=
"ti,am43"
,
},
{
.
compatible
=
"ti,dra7"
,
},
{
}
};
...
...
drivers/infiniband/core/security.c
浏览文件 @
4c4de7d3
...
...
@@ -432,8 +432,10 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
atomic_set
(
&
qp
->
qp_sec
->
error_list_count
,
0
);
init_completion
(
&
qp
->
qp_sec
->
error_complete
);
ret
=
security_ib_alloc_security
(
&
qp
->
qp_sec
->
security
);
if
(
ret
)
if
(
ret
)
{
kfree
(
qp
->
qp_sec
);
qp
->
qp_sec
=
NULL
;
}
return
ret
;
}
...
...
drivers/infiniband/core/uverbs_cmd.c
浏览文件 @
4c4de7d3
...
...
@@ -3869,15 +3869,15 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
resp
.
raw_packet_caps
=
attr
.
raw_packet_caps
;
resp
.
response_length
+=
sizeof
(
resp
.
raw_packet_caps
);
if
(
ucore
->
outlen
<
resp
.
response_length
+
sizeof
(
resp
.
xrq
_caps
))
if
(
ucore
->
outlen
<
resp
.
response_length
+
sizeof
(
resp
.
tm
_caps
))
goto
end
;
resp
.
xrq_caps
.
max_rndv_hdr_size
=
attr
.
xrq
_caps
.
max_rndv_hdr_size
;
resp
.
xrq_caps
.
max_num_tags
=
attr
.
xrq
_caps
.
max_num_tags
;
resp
.
xrq_caps
.
max_ops
=
attr
.
xrq
_caps
.
max_ops
;
resp
.
xrq_caps
.
max_sge
=
attr
.
xrq
_caps
.
max_sge
;
resp
.
xrq_caps
.
flags
=
attr
.
xrq
_caps
.
flags
;
resp
.
response_length
+=
sizeof
(
resp
.
xrq
_caps
);
resp
.
tm_caps
.
max_rndv_hdr_size
=
attr
.
tm
_caps
.
max_rndv_hdr_size
;
resp
.
tm_caps
.
max_num_tags
=
attr
.
tm
_caps
.
max_num_tags
;
resp
.
tm_caps
.
max_ops
=
attr
.
tm
_caps
.
max_ops
;
resp
.
tm_caps
.
max_sge
=
attr
.
tm
_caps
.
max_sge
;
resp
.
tm_caps
.
flags
=
attr
.
tm
_caps
.
flags
;
resp
.
response_length
+=
sizeof
(
resp
.
tm
_caps
);
end:
err
=
ib_copy_to_udata
(
ucore
,
&
resp
,
resp
.
response_length
);
return
err
;
...
...
drivers/infiniband/hw/hfi1/chip.c
浏览文件 @
4c4de7d3
...
...
@@ -1066,6 +1066,8 @@ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
static
int
thermal_init
(
struct
hfi1_devdata
*
dd
);
static
void
update_statusp
(
struct
hfi1_pportdata
*
ppd
,
u32
state
);
static
int
wait_phys_link_offline_substates
(
struct
hfi1_pportdata
*
ppd
,
int
msecs
);
static
int
wait_logical_linkstate
(
struct
hfi1_pportdata
*
ppd
,
u32
state
,
int
msecs
);
static
void
log_state_transition
(
struct
hfi1_pportdata
*
ppd
,
u32
state
);
...
...
@@ -8238,6 +8240,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
u64
regs
[
CCE_NUM_INT_CSRS
];
u32
bit
;
int
i
;
irqreturn_t
handled
=
IRQ_NONE
;
this_cpu_inc
(
*
dd
->
int_counter
);
...
...
@@ -8258,9 +8261,10 @@ static irqreturn_t general_interrupt(int irq, void *data)
for_each_set_bit
(
bit
,
(
unsigned
long
*
)
&
regs
[
0
],
CCE_NUM_INT_CSRS
*
64
)
{
is_interrupt
(
dd
,
bit
);
handled
=
IRQ_HANDLED
;
}
return
IRQ_HANDLED
;
return
handled
;
}
static
irqreturn_t
sdma_interrupt
(
int
irq
,
void
*
data
)
...
...
@@ -9413,7 +9417,7 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
write_csr
(
dd
,
dd
->
hfi1_id
?
ASIC_QSFP2_MASK
:
ASIC_QSFP1_MASK
,
mask
);
}
void
reset_qsfp
(
struct
hfi1_pportdata
*
ppd
)
int
reset_qsfp
(
struct
hfi1_pportdata
*
ppd
)
{
struct
hfi1_devdata
*
dd
=
ppd
->
dd
;
u64
mask
,
qsfp_mask
;
...
...
@@ -9443,6 +9447,13 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
* for alarms and warnings
*/
set_qsfp_int_n
(
ppd
,
1
);
/*
* After the reset, AOC transmitters are enabled by default. They need
* to be turned off to complete the QSFP setup before they can be
* enabled again.
*/
return
set_qsfp_tx
(
ppd
,
0
);
}
static
int
handle_qsfp_error_conditions
(
struct
hfi1_pportdata
*
ppd
,
...
...
@@ -10305,6 +10316,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
{
struct
hfi1_devdata
*
dd
=
ppd
->
dd
;
u32
previous_state
;
int
offline_state_ret
;
int
ret
;
update_lcb_cache
(
dd
);
...
...
@@ -10326,28 +10338,11 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
ppd
->
offline_disabled_reason
=
HFI1_ODR_MASK
(
OPA_LINKDOWN_REASON_TRANSIENT
);
/*
* Wait for offline transition. It can take a while for
* the link to go down.
*/
ret
=
wait_physical_linkstate
(
ppd
,
PLS_OFFLINE
,
10000
);
if
(
ret
<
0
)
return
ret
;
/*
* Now in charge of LCB - must be after the physical state is
* offline.quiet and before host_link_state is changed.
*/
set_host_lcb_access
(
dd
);
write_csr
(
dd
,
DC_LCB_ERR_EN
,
~
0ull
);
/* watch LCB errors */
/* make sure the logical state is also down */
ret
=
wait_logical_linkstate
(
ppd
,
IB_PORT_DOWN
,
1000
);
if
(
ret
)
force_logical_link_state_down
(
ppd
);
ppd
->
host_link_state
=
HLS_LINK_COOLDOWN
;
/* LCB access allowed */
offline_state_ret
=
wait_phys_link_offline_substates
(
ppd
,
10000
);
if
(
offline_state_ret
<
0
)
return
offline_state_ret
;
/* Disabling AOC transmitters */
if
(
ppd
->
port_type
==
PORT_TYPE_QSFP
&&
ppd
->
qsfp_info
.
limiting_active
&&
qsfp_mod_present
(
ppd
))
{
...
...
@@ -10364,6 +10359,30 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
}
}
/*
* Wait for the offline.Quiet transition if it hasn't happened yet. It
* can take a while for the link to go down.
*/
if
(
offline_state_ret
!=
PLS_OFFLINE_QUIET
)
{
ret
=
wait_physical_linkstate
(
ppd
,
PLS_OFFLINE
,
30000
);
if
(
ret
<
0
)
return
ret
;
}
/*
* Now in charge of LCB - must be after the physical state is
* offline.quiet and before host_link_state is changed.
*/
set_host_lcb_access
(
dd
);
write_csr
(
dd
,
DC_LCB_ERR_EN
,
~
0ull
);
/* watch LCB errors */
/* make sure the logical state is also down */
ret
=
wait_logical_linkstate
(
ppd
,
IB_PORT_DOWN
,
1000
);
if
(
ret
)
force_logical_link_state_down
(
ppd
);
ppd
->
host_link_state
=
HLS_LINK_COOLDOWN
;
/* LCB access allowed */
/*
* The LNI has a mandatory wait time after the physical state
* moves to Offline.Quiet. The wait time may be different
...
...
@@ -10396,6 +10415,9 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
&
(
HLS_DN_POLL
|
HLS_VERIFY_CAP
|
HLS_GOING_UP
))
{
/* went down while attempting link up */
check_lni_states
(
ppd
);
/* The QSFP doesn't need to be reset on LNI failure */
ppd
->
qsfp_info
.
reset_needed
=
0
;
}
/* the active link width (downgrade) is 0 on link down */
...
...
@@ -12804,6 +12826,39 @@ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
return
0
;
}
/*
* wait_phys_link_offline_quiet_substates - wait for any offline substate
* @ppd: port device
* @msecs: the number of milliseconds to wait
*
* Wait up to msecs milliseconds for any offline physical link
* state change to occur.
* Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
*/
static
int
wait_phys_link_offline_substates
(
struct
hfi1_pportdata
*
ppd
,
int
msecs
)
{
u32
read_state
;
unsigned
long
timeout
;
timeout
=
jiffies
+
msecs_to_jiffies
(
msecs
);
while
(
1
)
{
read_state
=
read_physical_state
(
ppd
->
dd
);
if
((
read_state
&
0xF0
)
==
PLS_OFFLINE
)
break
;
if
(
time_after
(
jiffies
,
timeout
))
{
dd_dev_err
(
ppd
->
dd
,
"timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms
\n
"
,
read_state
,
msecs
);
return
-
ETIMEDOUT
;
}
usleep_range
(
1950
,
2050
);
/* sleep 2ms-ish */
}
log_state_transition
(
ppd
,
read_state
);
return
read_state
;
}
#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
...
...
drivers/infiniband/hw/hfi1/chip.h
浏览文件 @
4c4de7d3
...
...
@@ -204,6 +204,7 @@
#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92
#define PLS_OFFLINE_REPORT_FAILURE 0x93
#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94
#define PLS_OFFLINE_QUIET_DURATION 0x95
#define PLS_POLLING 0x20
#define PLS_POLLING_QUIET 0x20
#define PLS_POLLING_ACTIVE 0x21
...
...
@@ -722,7 +723,7 @@ void handle_link_downgrade(struct work_struct *work);
void
handle_link_bounce
(
struct
work_struct
*
work
);
void
handle_start_link
(
struct
work_struct
*
work
);
void
handle_sma_message
(
struct
work_struct
*
work
);
void
reset_qsfp
(
struct
hfi1_pportdata
*
ppd
);
int
reset_qsfp
(
struct
hfi1_pportdata
*
ppd
);
void
qsfp_event
(
struct
work_struct
*
work
);
void
start_freeze_handling
(
struct
hfi1_pportdata
*
ppd
,
int
flags
);
int
send_idle_sma
(
struct
hfi1_devdata
*
dd
,
u64
message
);
...
...
drivers/infiniband/hw/hfi1/eprom.c
浏览文件 @
4c4de7d3
...
...
@@ -204,7 +204,10 @@ int eprom_init(struct hfi1_devdata *dd)
return
ret
;
}
/* magic character sequence that trails an image */
/* magic character sequence that begins an image */
#define IMAGE_START_MAGIC "APO="
/* magic character sequence that might trail an image */
#define IMAGE_TRAIL_MAGIC "egamiAPO"
/* EPROM file types */
...
...
@@ -250,6 +253,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
{
void
*
buffer
;
void
*
p
;
u32
length
;
int
ret
;
buffer
=
kmalloc
(
P1_SIZE
,
GFP_KERNEL
);
...
...
@@ -262,15 +266,21 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
return
ret
;
}
/* scan for image magic that may trail the actual data */
p
=
strnstr
(
buffer
,
IMAGE_TRAIL_MAGIC
,
P1_SIZE
);
if
(
!
p
)
{
/* config partition is valid only if it starts with IMAGE_START_MAGIC */
if
(
memcmp
(
buffer
,
IMAGE_START_MAGIC
,
strlen
(
IMAGE_START_MAGIC
)))
{
kfree
(
buffer
);
return
-
ENOENT
;
}
/* scan for image magic that may trail the actual data */
p
=
strnstr
(
buffer
,
IMAGE_TRAIL_MAGIC
,
P1_SIZE
);
if
(
p
)
length
=
p
-
buffer
;
else
length
=
P1_SIZE
;
*
data
=
buffer
;
*
size
=
p
-
buffer
;
*
size
=
length
;
return
0
;
}
...
...
drivers/infiniband/hw/hfi1/file_ops.c
浏览文件 @
4c4de7d3
...
...
@@ -930,15 +930,8 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
switch
(
ret
)
{
case
0
:
ret
=
setup_base_ctxt
(
fd
,
uctxt
);
if
(
uctxt
->
subctxt_cnt
)
{
/*
* Base context is done (successfully or not), notify
* anybody using a sub-context that is waiting for
* this completion.
*/
clear_bit
(
HFI1_CTXT_BASE_UNINIT
,
&
uctxt
->
event_flags
);
wake_up
(
&
uctxt
->
wait
);
}
if
(
ret
)
deallocate_ctxt
(
uctxt
);
break
;
case
1
:
ret
=
complete_subctxt
(
fd
);
...
...
@@ -1305,25 +1298,25 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
/* Now allocate the RcvHdr queue and eager buffers. */
ret
=
hfi1_create_rcvhdrq
(
dd
,
uctxt
);
if
(
ret
)
return
ret
;
goto
done
;
ret
=
hfi1_setup_eagerbufs
(
uctxt
);
if
(
ret
)
goto
setup_failed
;
goto
done
;
/* If sub-contexts are enabled, do the appropriate setup */
if
(
uctxt
->
subctxt_cnt
)
ret
=
setup_subctxt
(
uctxt
);
if
(
ret
)
goto
setup_failed
;
goto
done
;
ret
=
hfi1_alloc_ctxt_rcv_groups
(
uctxt
);
if
(
ret
)
goto
setup_failed
;
goto
done
;
ret
=
init_user_ctxt
(
fd
,
uctxt
);
if
(
ret
)
goto
setup_failed
;
goto
done
;
user_init
(
uctxt
);
...
...
@@ -1331,12 +1324,22 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
fd
->
uctxt
=
uctxt
;
hfi1_rcd_get
(
uctxt
);
return
0
;
done:
if
(
uctxt
->
subctxt_cnt
)
{
/*
* On error, set the failed bit so sub-contexts will clean up
* correctly.
*/
if
(
ret
)
set_bit
(
HFI1_CTXT_BASE_FAILED
,
&
uctxt
->
event_flags
);
setup_failed:
/* Set the failed bit so sub-context init can do the right thing */
set_bit
(
HFI1_CTXT_BASE_FAILED
,
&
uctxt
->
event_flags
);
deallocate_ctxt
(
uctxt
);
/*
* Base context is done (successfully or not), notify anybody
* using a sub-context that is waiting for this completion.
*/
clear_bit
(
HFI1_CTXT_BASE_UNINIT
,
&
uctxt
->
event_flags
);
wake_up
(
&
uctxt
->
wait
);
}
return
ret
;
}
...
...
drivers/infiniband/hw/hfi1/pcie.c
浏览文件 @
4c4de7d3
...
...
@@ -68,7 +68,7 @@
/*
* Code to adjust PCIe capabilities.
*/
static
int
tune_pcie_caps
(
struct
hfi1_devdata
*
);
static
void
tune_pcie_caps
(
struct
hfi1_devdata
*
);
/*
* Do all the common PCIe setup and initialization.
...
...
@@ -351,7 +351,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
*/
int
request_msix
(
struct
hfi1_devdata
*
dd
,
u32
msireq
)
{
int
nvec
,
ret
;
int
nvec
;
nvec
=
pci_alloc_irq_vectors
(
dd
->
pcidev
,
1
,
msireq
,
PCI_IRQ_MSIX
|
PCI_IRQ_LEGACY
);
...
...
@@ -360,12 +360,7 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq)
return
nvec
;
}
ret
=
tune_pcie_caps
(
dd
);
if
(
ret
)
{
dd_dev_err
(
dd
,
"tune_pcie_caps() failed: %d
\n
"
,
ret
);
pci_free_irq_vectors
(
dd
->
pcidev
);
return
ret
;
}
tune_pcie_caps
(
dd
);
/* check for legacy IRQ */
if
(
nvec
==
1
&&
!
dd
->
pcidev
->
msix_enabled
)
...
...
@@ -502,7 +497,7 @@ uint aspm_mode = ASPM_MODE_DISABLED;
module_param_named
(
aspm
,
aspm_mode
,
uint
,
S_IRUGO
);
MODULE_PARM_DESC
(
aspm
,
"PCIe ASPM: 0: disable, 1: enable, 2: dynamic"
);
static
int
tune_pcie_caps
(
struct
hfi1_devdata
*
dd
)
static
void
tune_pcie_caps
(
struct
hfi1_devdata
*
dd
)
{
struct
pci_dev
*
parent
;
u16
rc_mpss
,
rc_mps
,
ep_mpss
,
ep_mps
;
...
...
@@ -513,22 +508,14 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
* Turn on extended tags in DevCtl in case the BIOS has turned it off
* to improve WFR SDMA bandwidth
*/
ret
=
pcie_capability_read_word
(
dd
->
pcidev
,
PCI_EXP_DEVCTL
,
&
ectl
);
if
(
ret
)
{
dd_dev_err
(
dd
,
"Unable to read from PCI config
\n
"
);
return
ret
;
}
if
(
!
(
ectl
&
PCI_EXP_DEVCTL_EXT_TAG
))
{
ret
=
pcie_capability_read_word
(
dd
->
pcidev
,
PCI_EXP_DEVCTL
,
&
ectl
);
if
((
!
ret
)
&&
!
(
ectl
&
PCI_EXP_DEVCTL_EXT_TAG
))
{
dd_dev_info
(
dd
,
"Enabling PCIe extended tags
\n
"
);
ectl
|=
PCI_EXP_DEVCTL_EXT_TAG
;
ret
=
pcie_capability_write_word
(
dd
->
pcidev
,
PCI_EXP_DEVCTL
,
ectl
);
if
(
ret
)
{
dd_dev_err
(
dd
,
"Unable to write to PCI config
\n
"
);
return
ret
;
}
if
(
ret
)
dd_dev_info
(
dd
,
"Unable to write to PCI config
\n
"
);
}
/* Find out supported and configured values for parent (root) */
parent
=
dd
->
pcidev
->
bus
->
self
;
...
...
@@ -536,15 +523,22 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
* The driver cannot perform the tuning if it does not have
* access to the upstream component.
*/
if
(
!
parent
)
return
-
EINVAL
;
if
(
!
parent
)
{
dd_dev_info
(
dd
,
"Parent not found
\n
"
);
return
;
}
if
(
!
pci_is_root_bus
(
parent
->
bus
))
{
dd_dev_info
(
dd
,
"Parent not root
\n
"
);
return
-
EINVAL
;
return
;
}
if
(
!
pci_is_pcie
(
parent
))
{
dd_dev_info
(
dd
,
"Parent is not PCI Express capable
\n
"
);
return
;
}
if
(
!
pci_is_pcie
(
dd
->
pcidev
))
{
dd_dev_info
(
dd
,
"PCI device is not PCI Express capable
\n
"
);
return
;
}
if
(
!
pci_is_pcie
(
parent
)
||
!
pci_is_pcie
(
dd
->
pcidev
))
return
-
EINVAL
;
rc_mpss
=
parent
->
pcie_mpss
;
rc_mps
=
ffs
(
pcie_get_mps
(
parent
))
-
8
;
/* Find out supported and configured values for endpoint (us) */
...
...
@@ -590,8 +584,6 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
ep_mrrs
=
max_mrrs
;
pcie_set_readrq
(
dd
->
pcidev
,
ep_mrrs
);
}
return
0
;
}
/* End of PCIe capability tuning */
...
...
drivers/infiniband/hw/hfi1/platform.c
浏览文件 @
4c4de7d3
...
...
@@ -790,7 +790,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
* reuse of stale settings established in our previous pass through.
*/
if
(
ppd
->
qsfp_info
.
reset_needed
)
{
reset_qsfp
(
ppd
);
ret
=
reset_qsfp
(
ppd
);
if
(
ret
)
return
ret
;
refresh_qsfp_cache
(
ppd
,
&
ppd
->
qsfp_info
);
}
else
{
ppd
->
qsfp_info
.
reset_needed
=
1
;
...
...
drivers/infiniband/hw/mlx5/main.c
浏览文件 @
4c4de7d3
...
...
@@ -778,13 +778,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
if
(
MLX5_CAP_GEN
(
mdev
,
tag_matching
))
{
props
->
xrq
_caps
.
max_rndv_hdr_size
=
MLX5_TM_MAX_RNDV_MSG_SIZE
;
props
->
xrq
_caps
.
max_num_tags
=
props
->
tm
_caps
.
max_rndv_hdr_size
=
MLX5_TM_MAX_RNDV_MSG_SIZE
;
props
->
tm
_caps
.
max_num_tags
=
(
1
<<
MLX5_CAP_GEN
(
mdev
,
log_tag_matching_list_sz
))
-
1
;
props
->
xrq
_caps
.
flags
=
IB_TM_CAP_RC
;
props
->
xrq
_caps
.
max_ops
=
props
->
tm
_caps
.
flags
=
IB_TM_CAP_RC
;
props
->
tm
_caps
.
max_ops
=
1
<<
MLX5_CAP_GEN
(
mdev
,
log_max_qp_sz
);
props
->
xrq
_caps
.
max_sge
=
MLX5_TM_MAX_SGE
;
props
->
tm
_caps
.
max_sge
=
MLX5_TM_MAX_SGE
;
}
if
(
field_avail
(
typeof
(
resp
),
cqe_comp_caps
,
uhw
->
outlen
))
{
...
...
drivers/infiniband/hw/mlx5/mem.c
浏览文件 @
4c4de7d3
...
...
@@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
{
unsigned
long
tmp
;
unsigned
long
m
;
int
i
,
k
;
u64
base
=
0
;
int
p
=
0
;
int
skip
;
int
mask
;
u64
len
;
u64
pfn
;
u64
base
=
~
0
,
p
=
0
;
u64
len
,
pfn
;
int
i
=
0
;
struct
scatterlist
*
sg
;
int
entry
;
unsigned
long
page_shift
=
umem
->
page_shift
;
...
...
@@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
m
=
find_first_bit
(
&
tmp
,
BITS_PER_LONG
);
if
(
max_page_shift
)
m
=
min_t
(
unsigned
long
,
max_page_shift
-
page_shift
,
m
);
skip
=
1
<<
m
;
mask
=
skip
-
1
;
i
=
0
;
for_each_sg
(
umem
->
sg_head
.
sgl
,
sg
,
umem
->
nmap
,
entry
)
{
len
=
sg_dma_len
(
sg
)
>>
page_shift
;
pfn
=
sg_dma_address
(
sg
)
>>
page_shift
;
for
(
k
=
0
;
k
<
len
;
k
++
)
{
if
(
!
(
i
&
mask
))
{
tmp
=
(
unsigned
long
)
pfn
;
m
=
min_t
(
unsigned
long
,
m
,
find_first_bit
(
&
tmp
,
BITS_PER_LONG
));
skip
=
1
<<
m
;
mask
=
skip
-
1
;
base
=
pfn
;
p
=
0
;
}
else
{
if
(
base
+
p
!=
pfn
)
{
tmp
=
(
unsigned
long
)
p
;
m
=
find_first_bit
(
&
tmp
,
BITS_PER_LONG
);
skip
=
1
<<
m
;
mask
=
skip
-
1
;
base
=
pfn
;
p
=
0
;
}
}
p
++
;
i
++
;
if
(
base
+
p
!=
pfn
)
{
/* If either the offset or the new
* base are unaligned update m
*/
tmp
=
(
unsigned
long
)(
pfn
|
p
);
if
(
!
IS_ALIGNED
(
tmp
,
1
<<
m
))
m
=
find_first_bit
(
&
tmp
,
BITS_PER_LONG
);
base
=
pfn
;
p
=
0
;
}
p
+=
len
;
i
+=
len
;
}
if
(
i
)
{
...
...
drivers/infiniband/hw/mlx5/mr.c
浏览文件 @
4c4de7d3
...
...
@@ -47,7 +47,8 @@ enum {
#define MLX5_UMR_ALIGN 2048
static
int
clean_mr
(
struct
mlx5_ib_mr
*
mr
);
static
int
clean_mr
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_mr
*
mr
);
static
int
dereg_mr
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_mr
*
mr
);
static
int
mr_cache_max_order
(
struct
mlx5_ib_dev
*
dev
);
static
int
unreg_umr
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_mr
*
mr
);
...
...
@@ -1270,8 +1271,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
err
=
mlx5_ib_update_xlt
(
mr
,
0
,
ncont
,
page_shift
,
update_xlt_flags
);
if
(
err
)
{
mlx5_ib_dereg_mr
(
&
mr
->
ib
mr
);
dereg_mr
(
dev
,
mr
);
return
ERR_PTR
(
err
);
}
}
...
...
@@ -1356,7 +1358,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
err
=
mr_umem_get
(
pd
,
addr
,
len
,
access_flags
,
&
mr
->
umem
,
&
npages
,
&
page_shift
,
&
ncont
,
&
order
);
if
(
err
<
0
)
{
clean_mr
(
mr
);
clean_mr
(
dev
,
mr
);
return
err
;
}
}
...
...
@@ -1410,7 +1412,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
if
(
err
)
{
mlx5_ib_warn
(
dev
,
"Failed to rereg UMR
\n
"
);
ib_umem_release
(
mr
->
umem
);
clean_mr
(
mr
);
clean_mr
(
dev
,
mr
);
return
err
;
}
}
...
...
@@ -1469,9 +1471,8 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
}
}
static
int
clean_mr
(
struct
mlx5_ib_mr
*
mr
)
static
int
clean_mr
(
struct
mlx5_ib_
dev
*
dev
,
struct
mlx5_ib_
mr
*
mr
)
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
mr
->
ibmr
.
device
);
int
allocated_from_cache
=
mr
->
allocated_from_cache
;
int
err
;
...
...
@@ -1507,10 +1508,8 @@ static int clean_mr(struct mlx5_ib_mr *mr)
return
0
;
}
int
mlx5_ib_dereg_mr
(
struct
ib_mr
*
ib
mr
)
static
int
dereg_mr
(
struct
mlx5_ib_dev
*
dev
,
struct
mlx5_ib_mr
*
mr
)
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
ibmr
->
device
);
struct
mlx5_ib_mr
*
mr
=
to_mmr
(
ibmr
);
int
npages
=
mr
->
npages
;
struct
ib_umem
*
umem
=
mr
->
umem
;
...
...
@@ -1539,7 +1538,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
}
#endif
clean_mr
(
mr
);
clean_mr
(
dev
,
mr
);
if
(
umem
)
{
ib_umem_release
(
umem
);
...
...
@@ -1549,6 +1548,14 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
return
0
;
}
int
mlx5_ib_dereg_mr
(
struct
ib_mr
*
ibmr
)
{
struct
mlx5_ib_dev
*
dev
=
to_mdev
(
ibmr
->
device
);
struct
mlx5_ib_mr
*
mr
=
to_mmr
(
ibmr
);
return
dereg_mr
(
dev
,
mr
);
}
struct
ib_mr
*
mlx5_ib_alloc_mr
(
struct
ib_pd
*
pd
,
enum
ib_mr_type
mr_type
,
u32
max_num_sg
)
...
...
drivers/infiniband/hw/nes/nes_verbs.c
浏览文件 @
4c4de7d3
...
...
@@ -3232,7 +3232,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
mr
->
ibmr
.
iova
);
set_wqe_32bit_value
(
wqe
->
wqe_words
,
NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX
,
mr
->
ibmr
.
length
);
lower_32_bits
(
mr
->
ibmr
.
length
)
);
set_wqe_32bit_value
(
wqe
->
wqe_words
,
NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX
,
0
);
set_wqe_32bit_value
(
wqe
->
wqe_words
,
...
...
@@ -3274,7 +3274,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
mr
->
npages
*
8
);
nes_debug
(
NES_DBG_IW_TX
,
"SQ_REG_MR: iova_start: %llx, "
"length: %d, rkey: %0x, pgl_paddr: %llx, "
"length: %
ll
d, rkey: %0x, pgl_paddr: %llx, "
"page_list_len: %u, wqe_misc: %x
\n
"
,
(
unsigned
long
long
)
mr
->
ibmr
.
iova
,
mr
->
ibmr
.
length
,
...
...
drivers/infiniband/ulp/ipoib/ipoib_ib.c
浏览文件 @
4c4de7d3
...
...
@@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
*/
priv
->
dev
->
broadcast
[
8
]
=
priv
->
pkey
>>
8
;
priv
->
dev
->
broadcast
[
9
]
=
priv
->
pkey
&
0xff
;
/*
* Update the broadcast address in the priv->broadcast object,
* in case it already exists, otherwise no one will do that.
*/
if
(
priv
->
broadcast
)
{
spin_lock_irq
(
&
priv
->
lock
);
memcpy
(
priv
->
broadcast
->
mcmember
.
mgid
.
raw
,
priv
->
dev
->
broadcast
+
4
,
sizeof
(
union
ib_gid
));
spin_unlock_irq
(
&
priv
->
lock
);
}
return
0
;
}
...
...
drivers/infiniband/ulp/ipoib/ipoib_main.c
浏览文件 @
4c4de7d3
...
...
@@ -2180,6 +2180,7 @@ static struct net_device *ipoib_add_port(const char *format,
{
struct
ipoib_dev_priv
*
priv
;
struct
ib_port_attr
attr
;
struct
rdma_netdev
*
rn
;
int
result
=
-
ENOMEM
;
priv
=
ipoib_intf_alloc
(
hca
,
port
,
format
);
...
...
@@ -2279,7 +2280,8 @@ static struct net_device *ipoib_add_port(const char *format,
ipoib_dev_cleanup
(
priv
->
dev
);
device_init_failed:
free_netdev
(
priv
->
dev
);
rn
=
netdev_priv
(
priv
->
dev
);
rn
->
free_rdma_netdev
(
priv
->
dev
);
kfree
(
priv
);
alloc_mem_failed:
...
...
@@ -2328,7 +2330,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
return
;
list_for_each_entry_safe
(
priv
,
tmp
,
dev_list
,
list
)
{
struct
rdma_netdev
*
rn
=
netdev_priv
(
priv
->
dev
);
struct
rdma_netdev
*
parent_
rn
=
netdev_priv
(
priv
->
dev
);
ib_unregister_event_handler
(
&
priv
->
event_handler
);
flush_workqueue
(
ipoib_workqueue
);
...
...
@@ -2350,10 +2352,15 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
unregister_netdev
(
priv
->
dev
);
mutex_unlock
(
&
priv
->
sysfs_mutex
);
rn
->
free_rdma_netdev
(
priv
->
dev
);
parent_rn
->
free_rdma_netdev
(
priv
->
dev
);
list_for_each_entry_safe
(
cpriv
,
tcpriv
,
&
priv
->
child_intfs
,
list
)
{
struct
rdma_netdev
*
child_rn
;
list_for_each_entry_safe
(
cpriv
,
tcpriv
,
&
priv
->
child_intfs
,
list
)
child_rn
=
netdev_priv
(
cpriv
->
dev
);
child_rn
->
free_rdma_netdev
(
cpriv
->
dev
);
kfree
(
cpriv
);
}
kfree
(
priv
);
}
...
...
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
浏览文件 @
4c4de7d3
...
...
@@ -141,14 +141,17 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
return
restart_syscall
();
}
priv
=
ipoib_intf_alloc
(
ppriv
->
ca
,
ppriv
->
port
,
intf_name
);
if
(
!
priv
)
{
if
(
!
down_write_trylock
(
&
ppriv
->
vlan_rwsem
))
{
rtnl_unlock
();
mutex_unlock
(
&
ppriv
->
sysfs_mutex
);
return
-
ENOMEM
;
return
restart_syscall
()
;
}
down_write
(
&
ppriv
->
vlan_rwsem
);
priv
=
ipoib_intf_alloc
(
ppriv
->
ca
,
ppriv
->
port
,
intf_name
);
if
(
!
priv
)
{
result
=
-
ENOMEM
;
goto
out
;
}
/*
* First ensure this isn't a duplicate. We check the parent device and
...
...
@@ -175,8 +178,11 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
rtnl_unlock
();
mutex_unlock
(
&
ppriv
->
sysfs_mutex
);
if
(
result
)
{
free_netdev
(
priv
->
dev
);
if
(
result
&&
priv
)
{
struct
rdma_netdev
*
rn
;
rn
=
netdev_priv
(
priv
->
dev
);
rn
->
free_rdma_netdev
(
priv
->
dev
);
kfree
(
priv
);
}
...
...
@@ -204,7 +210,12 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
return
restart_syscall
();
}
down_write
(
&
ppriv
->
vlan_rwsem
);
if
(
!
down_write_trylock
(
&
ppriv
->
vlan_rwsem
))
{
rtnl_unlock
();
mutex_unlock
(
&
ppriv
->
sysfs_mutex
);
return
restart_syscall
();
}
list_for_each_entry_safe
(
priv
,
tpriv
,
&
ppriv
->
child_intfs
,
list
)
{
if
(
priv
->
pkey
==
pkey
&&
priv
->
child_type
==
IPOIB_LEGACY_CHILD
)
{
...
...
@@ -224,7 +235,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
mutex_unlock
(
&
ppriv
->
sysfs_mutex
);
if
(
dev
)
{
free_netdev
(
dev
);
struct
rdma_netdev
*
rn
;
rn
=
netdev_priv
(
dev
);
rn
->
free_rdma_netdev
(
priv
->
dev
);
kfree
(
priv
);
return
0
;
}
...
...
drivers/infiniband/ulp/iser/iser_memory.c
浏览文件 @
4c4de7d3
...
...
@@ -154,7 +154,7 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
{
int
i
;
iser_err
(
"page vec npages %d data length %d
\n
"
,
iser_err
(
"page vec npages %d data length %
ll
d
\n
"
,
page_vec
->
npages
,
page_vec
->
fake_mr
.
length
);
for
(
i
=
0
;
i
<
page_vec
->
npages
;
i
++
)
iser_err
(
"vec[%d]: %llx
\n
"
,
i
,
page_vec
->
pages
[
i
]);
...
...
fs/iomap.c
浏览文件 @
4c4de7d3
...
...
@@ -1009,6 +1009,13 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
WARN_ON_ONCE
(
ret
);
ret
=
0
;
if
(
iov_iter_rw
(
iter
)
==
WRITE
&&
!
is_sync_kiocb
(
iocb
)
&&
!
inode
->
i_sb
->
s_dio_done_wq
)
{
ret
=
sb_init_dio_done_wq
(
inode
->
i_sb
);
if
(
ret
<
0
)
goto
out_free_dio
;
}
inode_dio_begin
(
inode
);
blk_start_plug
(
&
plug
);
...
...
@@ -1031,13 +1038,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if
(
ret
<
0
)
iomap_dio_set_error
(
dio
,
ret
);
if
(
ret
>=
0
&&
iov_iter_rw
(
iter
)
==
WRITE
&&
!
is_sync_kiocb
(
iocb
)
&&
!
inode
->
i_sb
->
s_dio_done_wq
)
{
ret
=
sb_init_dio_done_wq
(
inode
->
i_sb
);
if
(
ret
<
0
)
iomap_dio_set_error
(
dio
,
ret
);
}
if
(
!
atomic_dec_and_test
(
&
dio
->
ref
))
{
if
(
!
is_sync_kiocb
(
iocb
))
return
-
EIOCBQUEUED
;
...
...
fs/xfs/libxfs/xfs_ag_resv.c
浏览文件 @
4c4de7d3
...
...
@@ -156,7 +156,8 @@ __xfs_ag_resv_free(
trace_xfs_ag_resv_free
(
pag
,
type
,
0
);
resv
=
xfs_perag_resv
(
pag
,
type
);
pag
->
pag_mount
->
m_ag_max_usable
+=
resv
->
ar_asked
;
if
(
pag
->
pag_agno
==
0
)
pag
->
pag_mount
->
m_ag_max_usable
+=
resv
->
ar_asked
;
/*
* AGFL blocks are always considered "free", so whatever
* was reserved at mount time must be given back at umount.
...
...
@@ -216,7 +217,14 @@ __xfs_ag_resv_init(
return
error
;
}
mp
->
m_ag_max_usable
-=
ask
;
/*
* Reduce the maximum per-AG allocation length by however much we're
* trying to reserve for an AG. Since this is a filesystem-wide
* counter, we only make the adjustment for AG 0. This assumes that
* there aren't any AGs hungrier for per-AG reservation than AG 0.
*/
if
(
pag
->
pag_agno
==
0
)
mp
->
m_ag_max_usable
-=
ask
;
resv
=
xfs_perag_resv
(
pag
,
type
);
resv
->
ar_asked
=
ask
;
...
...
fs/xfs/libxfs/xfs_bmap.c
浏览文件 @
4c4de7d3
...
...
@@ -49,7 +49,6 @@
#include "xfs_rmap.h"
#include "xfs_ag_resv.h"
#include "xfs_refcount.h"
#include "xfs_rmap_btree.h"
#include "xfs_icache.h"
...
...
@@ -192,12 +191,8 @@ xfs_bmap_worst_indlen(
int
maxrecs
;
/* maximum record count at this level */
xfs_mount_t
*
mp
;
/* mount structure */
xfs_filblks_t
rval
;
/* return value */
xfs_filblks_t
orig_len
;
mp
=
ip
->
i_mount
;
/* Calculate the worst-case size of the bmbt. */
orig_len
=
len
;
maxrecs
=
mp
->
m_bmap_dmxr
[
0
];
for
(
level
=
0
,
rval
=
0
;
level
<
XFS_BM_MAXLEVELS
(
mp
,
XFS_DATA_FORK
);
...
...
@@ -205,20 +200,12 @@ xfs_bmap_worst_indlen(
len
+=
maxrecs
-
1
;
do_div
(
len
,
maxrecs
);
rval
+=
len
;
if
(
len
==
1
)
{
r
val
+=
XFS_BM_MAXLEVELS
(
mp
,
XFS_DATA_FORK
)
-
if
(
len
==
1
)
r
eturn
rval
+
XFS_BM_MAXLEVELS
(
mp
,
XFS_DATA_FORK
)
-
level
-
1
;
break
;
}
if
(
level
==
0
)
maxrecs
=
mp
->
m_bmap_dmxr
[
1
];
}
/* Calculate the worst-case size of the rmapbt. */
if
(
xfs_sb_version_hasrmapbt
(
&
mp
->
m_sb
))
rval
+=
1
+
xfs_rmapbt_calc_size
(
mp
,
orig_len
)
+
mp
->
m_rmap_maxlevels
;
return
rval
;
}
...
...
fs/xfs/xfs_aops.c
浏览文件 @
4c4de7d3
...
...
@@ -343,7 +343,8 @@ xfs_end_io(
error
=
xfs_reflink_end_cow
(
ip
,
offset
,
size
);
break
;
case
XFS_IO_UNWRITTEN
:
error
=
xfs_iomap_write_unwritten
(
ip
,
offset
,
size
);
/* writeback should never update isize */
error
=
xfs_iomap_write_unwritten
(
ip
,
offset
,
size
,
false
);
break
;
default:
ASSERT
(
!
xfs_ioend_is_append
(
ioend
)
||
ioend
->
io_append_trans
);
...
...
fs/xfs/xfs_bmap_util.c
浏览文件 @
4c4de7d3
...
...
@@ -1459,7 +1459,19 @@ xfs_shift_file_space(
return
error
;
/*
* The extent shiting code works on extent granularity. So, if
* Clean out anything hanging around in the cow fork now that
* we've flushed all the dirty data out to disk to avoid having
* CoW extents at the wrong offsets.
*/
if
(
xfs_is_reflink_inode
(
ip
))
{
error
=
xfs_reflink_cancel_cow_range
(
ip
,
offset
,
NULLFILEOFF
,
true
);
if
(
error
)
return
error
;
}
/*
* The extent shifting code works on extent granularity. So, if
* stop_fsb is not the starting block of extent, we need to split
* the extent at stop_fsb.
*/
...
...
fs/xfs/xfs_buf.c
浏览文件 @
4c4de7d3
...
...
@@ -1258,8 +1258,6 @@ xfs_buf_ioapply_map(
int
size
;
int
offset
;
total_nr_pages
=
bp
->
b_page_count
;
/* skip the pages in the buffer before the start offset */
page_index
=
0
;
offset
=
*
buf_offset
;
...
...
fs/xfs/xfs_error.c
浏览文件 @
4c4de7d3
...
...
@@ -347,7 +347,7 @@ xfs_verifier_error(
{
struct
xfs_mount
*
mp
=
bp
->
b_target
->
bt_mount
;
xfs_alert
(
mp
,
"Metadata %s detected at %p
F
, %s block 0x%llx"
,
xfs_alert
(
mp
,
"Metadata %s detected at %p
S
, %s block 0x%llx"
,
bp
->
b_error
==
-
EFSBADCRC
?
"CRC error"
:
"corruption"
,
__return_address
,
bp
->
b_ops
->
name
,
bp
->
b_bn
);
...
...
fs/xfs/xfs_file.c
浏览文件 @
4c4de7d3
...
...
@@ -58,7 +58,7 @@ xfs_zero_range(
xfs_off_t
count
,
bool
*
did_zero
)
{
return
iomap_zero_range
(
VFS_I
(
ip
),
pos
,
count
,
NULL
,
&
xfs_iomap_ops
);
return
iomap_zero_range
(
VFS_I
(
ip
),
pos
,
count
,
did_zero
,
&
xfs_iomap_ops
);
}
int
...
...
@@ -377,8 +377,6 @@ xfs_file_aio_write_checks(
*/
spin_lock
(
&
ip
->
i_flags_lock
);
if
(
iocb
->
ki_pos
>
i_size_read
(
inode
))
{
bool
zero
=
false
;
spin_unlock
(
&
ip
->
i_flags_lock
);
if
(
!
drained_dio
)
{
if
(
*
iolock
==
XFS_IOLOCK_SHARED
)
{
...
...
@@ -399,7 +397,7 @@ xfs_file_aio_write_checks(
drained_dio
=
true
;
goto
restart
;
}
error
=
xfs_zero_eof
(
ip
,
iocb
->
ki_pos
,
i_size_read
(
inode
),
&
zero
);
error
=
xfs_zero_eof
(
ip
,
iocb
->
ki_pos
,
i_size_read
(
inode
),
NULL
);
if
(
error
)
return
error
;
}
else
...
...
@@ -436,7 +434,6 @@ xfs_dio_write_end_io(
struct
inode
*
inode
=
file_inode
(
iocb
->
ki_filp
);
struct
xfs_inode
*
ip
=
XFS_I
(
inode
);
loff_t
offset
=
iocb
->
ki_pos
;
bool
update_size
=
false
;
int
error
=
0
;
trace_xfs_end_io_direct_write
(
ip
,
offset
,
size
);
...
...
@@ -447,6 +444,21 @@ xfs_dio_write_end_io(
if
(
size
<=
0
)
return
size
;
if
(
flags
&
IOMAP_DIO_COW
)
{
error
=
xfs_reflink_end_cow
(
ip
,
offset
,
size
);
if
(
error
)
return
error
;
}
/*
* Unwritten conversion updates the in-core isize after extent
* conversion but before updating the on-disk size. Updating isize any
* earlier allows a racing dio read to find unwritten extents before
* they are converted.
*/
if
(
flags
&
IOMAP_DIO_UNWRITTEN
)
return
xfs_iomap_write_unwritten
(
ip
,
offset
,
size
,
true
);
/*
* We need to update the in-core inode size here so that we don't end up
* with the on-disk inode size being outside the in-core inode size. We
...
...
@@ -461,20 +473,11 @@ xfs_dio_write_end_io(
spin_lock
(
&
ip
->
i_flags_lock
);
if
(
offset
+
size
>
i_size_read
(
inode
))
{
i_size_write
(
inode
,
offset
+
size
);
update_size
=
true
;
}
spin_unlock
(
&
ip
->
i_flags_lock
);
if
(
flags
&
IOMAP_DIO_COW
)
{
error
=
xfs_reflink_end_cow
(
ip
,
offset
,
size
);
if
(
error
)
return
error
;
}
if
(
flags
&
IOMAP_DIO_UNWRITTEN
)
error
=
xfs_iomap_write_unwritten
(
ip
,
offset
,
size
);
else
if
(
update_size
)
spin_unlock
(
&
ip
->
i_flags_lock
);
error
=
xfs_setfilesize
(
ip
,
offset
,
size
);
}
else
{
spin_unlock
(
&
ip
->
i_flags_lock
);
}
return
error
;
}
...
...
fs/xfs/xfs_inode.c
浏览文件 @
4c4de7d3
...
...
@@ -1624,10 +1624,12 @@ xfs_itruncate_extents(
goto
out
;
/*
* Clear the reflink flag if we truncated everything.
* Clear the reflink flag if there are no data fork blocks and
* there are no extents staged in the cow fork.
*/
if
(
ip
->
i_d
.
di_nblocks
==
0
&&
xfs_is_reflink_inode
(
ip
))
{
ip
->
i_d
.
di_flags2
&=
~
XFS_DIFLAG2_REFLINK
;
if
(
xfs_is_reflink_inode
(
ip
)
&&
ip
->
i_cnextents
==
0
)
{
if
(
ip
->
i_d
.
di_nblocks
==
0
)
ip
->
i_d
.
di_flags2
&=
~
XFS_DIFLAG2_REFLINK
;
xfs_inode_clear_cowblocks_tag
(
ip
);
}
...
...
fs/xfs/xfs_inode_item.c
浏览文件 @
4c4de7d3
...
...
@@ -745,7 +745,7 @@ xfs_iflush_done(
*/
iip
=
INODE_ITEM
(
blip
);
if
((
iip
->
ili_logged
&&
blip
->
li_lsn
==
iip
->
ili_flush_lsn
)
||
lip
->
li_flags
&
XFS_LI_FAILED
)
(
blip
->
li_flags
&
XFS_LI_FAILED
)
)
need_ail
++
;
blip
=
next
;
...
...
fs/xfs/xfs_ioctl.c
浏览文件 @
4c4de7d3
...
...
@@ -1088,6 +1088,7 @@ xfs_ioctl_setattr_dax_invalidate(
int
*
join_flags
)
{
struct
inode
*
inode
=
VFS_I
(
ip
);
struct
super_block
*
sb
=
inode
->
i_sb
;
int
error
;
*
join_flags
=
0
;
...
...
@@ -1100,7 +1101,7 @@ xfs_ioctl_setattr_dax_invalidate(
if
(
fa
->
fsx_xflags
&
FS_XFLAG_DAX
)
{
if
(
!
(
S_ISREG
(
inode
->
i_mode
)
||
S_ISDIR
(
inode
->
i_mode
)))
return
-
EINVAL
;
if
(
ip
->
i_mount
->
m_sb
.
sb_blocksize
!=
PAGE_SIZE
)
if
(
bdev_dax_supported
(
sb
,
sb
->
s_blocksize
)
<
0
)
return
-
EINVAL
;
}
...
...
fs/xfs/xfs_iomap.c
浏览文件 @
4c4de7d3
...
...
@@ -829,7 +829,8 @@ int
xfs_iomap_write_unwritten
(
xfs_inode_t
*
ip
,
xfs_off_t
offset
,
xfs_off_t
count
)
xfs_off_t
count
,
bool
update_isize
)
{
xfs_mount_t
*
mp
=
ip
->
i_mount
;
xfs_fileoff_t
offset_fsb
;
...
...
@@ -840,6 +841,7 @@ xfs_iomap_write_unwritten(
xfs_trans_t
*
tp
;
xfs_bmbt_irec_t
imap
;
struct
xfs_defer_ops
dfops
;
struct
inode
*
inode
=
VFS_I
(
ip
);
xfs_fsize_t
i_size
;
uint
resblks
;
int
error
;
...
...
@@ -899,7 +901,8 @@ xfs_iomap_write_unwritten(
i_size
=
XFS_FSB_TO_B
(
mp
,
offset_fsb
+
count_fsb
);
if
(
i_size
>
offset
+
count
)
i_size
=
offset
+
count
;
if
(
update_isize
&&
i_size
>
i_size_read
(
inode
))
i_size_write
(
inode
,
i_size
);
i_size
=
xfs_new_eof
(
ip
,
i_size
);
if
(
i_size
)
{
ip
->
i_d
.
di_size
=
i_size
;
...
...
fs/xfs/xfs_iomap.h
浏览文件 @
4c4de7d3
...
...
@@ -27,7 +27,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
struct
xfs_bmbt_irec
*
,
int
);
int
xfs_iomap_write_allocate
(
struct
xfs_inode
*
,
int
,
xfs_off_t
,
struct
xfs_bmbt_irec
*
);
int
xfs_iomap_write_unwritten
(
struct
xfs_inode
*
,
xfs_off_t
,
xfs_off_t
);
int
xfs_iomap_write_unwritten
(
struct
xfs_inode
*
,
xfs_off_t
,
xfs_off_t
,
bool
);
void
xfs_bmbt_to_iomap
(
struct
xfs_inode
*
,
struct
iomap
*
,
struct
xfs_bmbt_irec
*
);
...
...
fs/xfs/xfs_pnfs.c
浏览文件 @
4c4de7d3
...
...
@@ -274,7 +274,7 @@ xfs_fs_commit_blocks(
(
end
-
1
)
>>
PAGE_SHIFT
);
WARN_ON_ONCE
(
error
);
error
=
xfs_iomap_write_unwritten
(
ip
,
start
,
length
);
error
=
xfs_iomap_write_unwritten
(
ip
,
start
,
length
,
false
);
if
(
error
)
goto
out_drop_iolock
;
}
...
...
fs/xfs/xfs_super.c
浏览文件 @
4c4de7d3
...
...
@@ -1654,6 +1654,16 @@ xfs_fs_fill_super(
"DAX and reflink have not been tested together!"
);
}
if
(
mp
->
m_flags
&
XFS_MOUNT_DISCARD
)
{
struct
request_queue
*
q
=
bdev_get_queue
(
sb
->
s_bdev
);
if
(
!
blk_queue_discard
(
q
))
{
xfs_warn
(
mp
,
"mounting with
\"
discard
\"
option, but "
"the device does not support discard"
);
mp
->
m_flags
&=
~
XFS_MOUNT_DISCARD
;
}
}
if
(
xfs_sb_version_hasrmapbt
(
&
mp
->
m_sb
))
{
if
(
mp
->
m_sb
.
sb_rblocks
)
{
xfs_alert
(
mp
,
...
...
include/rdma/ib_verbs.h
浏览文件 @
4c4de7d3
...
...
@@ -285,7 +285,7 @@ enum ib_tm_cap_flags {
IB_TM_CAP_RC
=
1
<<
0
,
};
struct
ib_
xrq
_caps
{
struct
ib_
tm
_caps
{
/* Max size of RNDV header */
u32
max_rndv_hdr_size
;
/* Max number of entries in tag matching list */
...
...
@@ -358,7 +358,7 @@ struct ib_device_attr {
struct
ib_rss_caps
rss_caps
;
u32
max_wq_type_rq
;
u32
raw_packet_caps
;
/* Use ib_raw_packet_caps enum */
struct
ib_
xrq_caps
xrq
_caps
;
struct
ib_
tm_caps
tm
_caps
;
};
enum
ib_mtu
{
...
...
@@ -1739,7 +1739,7 @@ struct ib_mr {
u32
lkey
;
u32
rkey
;
u64
iova
;
u
32
length
;
u
64
length
;
unsigned
int
page_size
;
bool
need_inval
;
union
{
...
...
include/uapi/rdma/ib_user_verbs.h
浏览文件 @
4c4de7d3
...
...
@@ -261,7 +261,7 @@ struct ib_uverbs_ex_query_device_resp {
struct
ib_uverbs_rss_caps
rss_caps
;
__u32
max_wq_type_rq
;
__u32
raw_packet_caps
;
struct
ib_uverbs_tm_caps
xrq
_caps
;
struct
ib_uverbs_tm_caps
tm
_caps
;
};
struct
ib_uverbs_query_port
{
...
...
kernel/events/ring_buffer.c
浏览文件 @
4c4de7d3
...
...
@@ -412,6 +412,19 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
return
NULL
;
}
static
bool
__always_inline
rb_need_aux_wakeup
(
struct
ring_buffer
*
rb
)
{
if
(
rb
->
aux_overwrite
)
return
false
;
if
(
rb
->
aux_head
-
rb
->
aux_wakeup
>=
rb
->
aux_watermark
)
{
rb
->
aux_wakeup
=
rounddown
(
rb
->
aux_head
,
rb
->
aux_watermark
);
return
true
;
}
return
false
;
}
/*
* Commit the data written by hardware into the ring buffer by adjusting
* aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
...
...
@@ -451,10 +464,8 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
}
rb
->
user_page
->
aux_head
=
rb
->
aux_head
;
if
(
rb
->
aux_head
-
rb
->
aux_wakeup
>=
rb
->
aux_watermark
)
{
if
(
rb
_need_aux_wakeup
(
rb
))
wakeup
=
true
;
rb
->
aux_wakeup
=
rounddown
(
rb
->
aux_head
,
rb
->
aux_watermark
);
}
if
(
wakeup
)
{
if
(
handle
->
aux_flags
&
PERF_AUX_FLAG_TRUNCATED
)
...
...
@@ -484,9 +495,8 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
rb
->
aux_head
+=
size
;
rb
->
user_page
->
aux_head
=
rb
->
aux_head
;
if
(
rb
->
aux_head
-
rb
->
aux_wakeup
>=
rb
->
aux_watermark
)
{
if
(
rb
_need_aux_wakeup
(
rb
)
)
{
perf_output_wakeup
(
handle
);
rb
->
aux_wakeup
=
rounddown
(
rb
->
aux_head
,
rb
->
aux_watermark
);
handle
->
wakeup
=
rb
->
aux_wakeup
+
rb
->
aux_watermark
;
}
...
...
kernel/seccomp.c
浏览文件 @
4c4de7d3
...
...
@@ -473,14 +473,19 @@ static long seccomp_attach_filter(unsigned int flags,
return
0
;
}
void
__get_seccomp_filter
(
struct
seccomp_filter
*
filter
)
{
/* Reference count is bounded by the number of total processes. */
refcount_inc
(
&
filter
->
usage
);
}
/* get_seccomp_filter - increments the reference count of the filter on @tsk */
void
get_seccomp_filter
(
struct
task_struct
*
tsk
)
{
struct
seccomp_filter
*
orig
=
tsk
->
seccomp
.
filter
;
if
(
!
orig
)
return
;
/* Reference count is bounded by the number of total processes. */
refcount_inc
(
&
orig
->
usage
);
__get_seccomp_filter
(
orig
);
}
static
inline
void
seccomp_filter_free
(
struct
seccomp_filter
*
filter
)
...
...
@@ -491,10 +496,8 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter)
}
}
/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
void
put_seccomp_filter
(
struct
task_struct
*
tsk
)
static
void
__put_seccomp_filter
(
struct
seccomp_filter
*
orig
)
{
struct
seccomp_filter
*
orig
=
tsk
->
seccomp
.
filter
;
/* Clean up single-reference branches iteratively. */
while
(
orig
&&
refcount_dec_and_test
(
&
orig
->
usage
))
{
struct
seccomp_filter
*
freeme
=
orig
;
...
...
@@ -503,6 +506,12 @@ void put_seccomp_filter(struct task_struct *tsk)
}
}
/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
void
put_seccomp_filter
(
struct
task_struct
*
tsk
)
{
__put_seccomp_filter
(
tsk
->
seccomp
.
filter
);
}
static
void
seccomp_init_siginfo
(
siginfo_t
*
info
,
int
syscall
,
int
reason
)
{
memset
(
info
,
0
,
sizeof
(
*
info
));
...
...
@@ -1025,13 +1034,13 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
if
(
!
data
)
goto
out
;
get_seccomp_filter
(
task
);
__get_seccomp_filter
(
filter
);
spin_unlock_irq
(
&
task
->
sighand
->
siglock
);
if
(
copy_to_user
(
data
,
fprog
->
filter
,
bpf_classic_proglen
(
fprog
)))
ret
=
-
EFAULT
;
put_seccomp_filter
(
task
);
__put_seccomp_filter
(
filter
);
return
ret
;
out:
...
...
net/bluetooth/Kconfig
浏览文件 @
4c4de7d3
...
...
@@ -126,14 +126,4 @@ config BT_DEBUGFS
Provide extensive information about internal Bluetooth states
in debugfs.
config BT_LEGACY_IOCTL
bool "Enable legacy ioctl interfaces"
depends on BT && BT_BREDR
default y
help
Enable support for legacy ioctl interfaces. This is only needed
for old and deprecated applications using direct ioctl calls for
controller management. Since Linux 3.4 all configuration and
setup is done via mgmt interface and this is no longer needed.
source "drivers/bluetooth/Kconfig"
net/bluetooth/hci_sock.c
浏览文件 @
4c4de7d3
...
...
@@ -878,7 +878,6 @@ static int hci_sock_release(struct socket *sock)
return
0
;
}
#ifdef CONFIG_BT_LEGACY_IOCTL
static
int
hci_sock_blacklist_add
(
struct
hci_dev
*
hdev
,
void
__user
*
arg
)
{
bdaddr_t
bdaddr
;
...
...
@@ -1050,7 +1049,6 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
release_sock
(
sk
);
return
err
;
}
#endif
static
int
hci_sock_bind
(
struct
socket
*
sock
,
struct
sockaddr
*
addr
,
int
addr_len
)
...
...
@@ -1971,11 +1969,7 @@ static const struct proto_ops hci_sock_ops = {
.
getname
=
hci_sock_getname
,
.
sendmsg
=
hci_sock_sendmsg
,
.
recvmsg
=
hci_sock_recvmsg
,
#ifdef CONFIG_BT_LEGACY_IOCTL
.
ioctl
=
hci_sock_ioctl
,
#else
.
ioctl
=
sock_no_ioctl
,
#endif
.
poll
=
datagram_poll
,
.
listen
=
sock_no_listen
,
.
shutdown
=
sock_no_shutdown
,
...
...
net/sunrpc/xprtrdma/frwr_ops.c
浏览文件 @
4c4de7d3
...
...
@@ -401,7 +401,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
if
(
unlikely
(
n
!=
mw
->
mw_nents
))
goto
out_mapmr_err
;
dprintk
(
"RPC: %s: Using frmr %p to map %u segments (%u bytes)
\n
"
,
dprintk
(
"RPC: %s: Using frmr %p to map %u segments (%
ll
u bytes)
\n
"
,
__func__
,
frmr
,
mw
->
mw_nents
,
mr
->
length
);
key
=
(
u8
)(
mr
->
rkey
&
0x000000FF
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录