Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Chu Peng 楚鹏
minikube
提交
70c228c2
M
minikube
项目概览
Chu Peng 楚鹏
/
minikube
与 Fork 源项目一致
从无法访问的项目Fork
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
minikube
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
提交
70c228c2
编写于
5月 21, 2020
作者:
S
Sharif Elgamal
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
adding postmortem logs for worker nodes in integration tests
上级
9fe9513c
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
72 addition
and
56 deletion
+72
-56
test/integration/helpers.go
test/integration/helpers.go
+61
-45
test/integration/main.go
test/integration/main.go
+6
-0
test/integration/multinode_test.go
test/integration/multinode_test.go
+5
-11
未找到文件。
test/integration/helpers.go
浏览文件 @
70c228c2
...
...
@@ -191,7 +191,7 @@ func CleanupWithLogs(t *testing.T, profile string, cancel context.CancelFunc) {
}
// PostMortemLogs shows logs for debugging a failed cluster
func
PostMortemLogs
(
t
*
testing
.
T
,
profile
string
,
node
...
string
)
{
func
PostMortemLogs
(
t
*
testing
.
T
,
profile
string
,
multinode
...
bool
)
{
if
!
t
.
Failed
()
{
return
}
...
...
@@ -201,57 +201,73 @@ func PostMortemLogs(t *testing.T, profile string, node ...string) {
return
}
m
:=
false
if
len
(
multinode
)
>
0
{
m
=
multinode
[
0
]
}
nodes
:=
[]
string
{
profile
}
if
m
{
nodes
=
append
(
nodes
,
SecondNodeName
,
ThirdNodeName
)
}
t
.
Logf
(
"-----------------------post-mortem--------------------------------"
)
if
DockerDriver
()
{
t
.
Logf
(
"======> post-mortem[%s]: docker inspect <======"
,
t
.
Name
())
rr
,
err
:=
Run
(
t
,
exec
.
Command
(
"docker"
,
"inspect"
,
profile
))
if
err
!=
nil
{
t
.
Logf
(
"failed to get docker inspect: %v"
,
err
)
}
else
{
t
.
Logf
(
"(dbg) %s:
\n
%s"
,
rr
.
Command
(),
rr
.
Output
())
for
_
,
n
:=
range
nodes
{
machine
:=
profile
if
n
!=
profile
{
machine
=
fmt
.
Sprintf
(
"%s-%s"
,
profile
,
n
)
}
if
DockerDriver
()
{
t
.
Logf
(
"======> post-mortem[%s]: docker inspect <======"
,
t
.
Name
())
rr
,
err
:=
Run
(
t
,
exec
.
Command
(
"docker"
,
"inspect"
,
machine
))
if
err
!=
nil
{
t
.
Logf
(
"failed to get docker inspect: %v"
,
err
)
}
else
{
t
.
Logf
(
"(dbg) %s:
\n
%s"
,
rr
.
Command
(),
rr
.
Output
())
}
}
}
st
:=
Status
(
context
.
Background
(),
t
,
Target
(),
profile
,
"Host"
)
if
st
!=
state
.
Running
.
String
()
{
t
.
Logf
(
"%q host is not running, skipping log retrieval (state=%q)"
,
profile
,
st
)
return
}
t
.
Logf
(
"<<< %s FAILED: start of post-mortem logs <<<"
,
t
.
Name
())
t
.
Logf
(
"======> post-mortem[%s]: minikube logs <======"
,
t
.
Name
())
st
:=
Status
(
context
.
Background
(),
t
,
Target
(),
profile
,
"Host"
,
n
)
if
st
!=
state
.
Running
.
String
()
{
t
.
Logf
(
"%q host is not running, skipping log retrieval (state=%q)"
,
profile
,
st
)
return
}
t
.
Logf
(
"<<< %s FAILED: start of post-mortem logs <<<"
,
t
.
Name
())
t
.
Logf
(
"======> post-mortem[%s]: minikube logs <======"
,
t
.
Name
())
rr
,
err
:=
Run
(
t
,
exec
.
Command
(
Target
(),
"-p"
,
profile
,
"logs"
,
"-n"
,
"25"
))
if
err
!=
nil
{
t
.
Logf
(
"failed logs error: %v"
,
err
)
return
}
t
.
Logf
(
"%s logs: %s"
,
t
.
Name
(),
rr
.
Output
())
rr
,
err
:=
Run
(
t
,
exec
.
Command
(
Target
(),
"-p"
,
profile
,
"logs"
,
"-n"
,
"25"
))
if
err
!=
nil
{
t
.
Logf
(
"failed logs error: %v"
,
err
)
return
}
t
.
Logf
(
"%s logs: %s"
,
t
.
Name
(),
rr
.
Output
())
st
=
Status
(
context
.
Background
(),
t
,
Target
(),
profile
,
"APIServer"
)
if
st
!=
state
.
Running
.
String
()
{
t
.
Logf
(
"%q apiserver is not running, skipping kubectl commands (state=%q)"
,
profile
,
st
)
return
}
st
=
Status
(
context
.
Background
(),
t
,
Target
(),
profile
,
"APIServer"
,
n
)
if
st
!=
state
.
Running
.
String
()
{
t
.
Logf
(
"%q apiserver is not running, skipping kubectl commands (state=%q)"
,
profile
,
st
)
return
}
// Get non-running pods. NOTE: This does not yet contain pods which are "running", but not "ready"
rr
,
rerr
:=
Run
(
t
,
exec
.
Command
(
"kubectl"
,
"--context"
,
profile
,
"get"
,
"po"
,
"-o=jsonpath={.items[*].metadata.name}"
,
"-A"
,
"--field-selector=status.phase!=Running"
))
if
rerr
!=
nil
{
t
.
Logf
(
"%s: %v"
,
rr
.
Command
(),
rerr
)
return
}
notRunning
:=
strings
.
Split
(
rr
.
Stdout
.
String
(),
" "
)
t
.
Logf
(
"non-running pods: %s"
,
strings
.
Join
(
notRunning
,
" "
))
// Get non-running pods. NOTE: This does not yet contain pods which are "running", but not "ready"
rr
,
rerr
:=
Run
(
t
,
exec
.
Command
(
"kubectl"
,
"--context"
,
profile
,
"get"
,
"po"
,
"-o=jsonpath={.items[*].metadata.name}"
,
"-A"
,
"--field-selector=status.phase!=Running"
))
if
rerr
!=
nil
{
t
.
Logf
(
"%s: %v"
,
rr
.
Command
(),
rerr
)
return
}
notRunning
:=
strings
.
Split
(
rr
.
Stdout
.
String
(),
" "
)
t
.
Logf
(
"non-running pods: %s"
,
strings
.
Join
(
notRunning
,
" "
))
t
.
Logf
(
"======> post-mortem[%s]: describe non-running pods <======"
,
t
.
Name
())
t
.
Logf
(
"======> post-mortem[%s]: describe non-running pods <======"
,
t
.
Name
())
args
:=
append
([]
string
{
"--context"
,
profile
,
"describe"
,
"pod"
},
notRunning
...
)
rr
,
rerr
=
Run
(
t
,
exec
.
Command
(
"kubectl"
,
args
...
))
if
rerr
!=
nil
{
t
.
Logf
(
"%s: %v"
,
rr
.
Command
(),
rerr
)
return
args
:=
append
([]
string
{
"--context"
,
profile
,
"describe"
,
"pod"
},
notRunning
...
)
rr
,
rerr
=
Run
(
t
,
exec
.
Command
(
"kubectl"
,
args
...
))
if
rerr
!=
nil
{
t
.
Logf
(
"%s: %v"
,
rr
.
Command
(),
rerr
)
return
}
t
.
Logf
(
"(dbg) %s:
\n
%s"
,
rr
.
Command
(),
rr
.
Output
())
}
t
.
Logf
(
"(dbg) %s:
\n
%s"
,
rr
.
Command
(),
rr
.
Output
())
t
.
Logf
(
"<<< %s FAILED: end of post-mortem logs <<<"
,
t
.
Name
())
t
.
Logf
(
"---------------------/post-mortem---------------------------------"
)
...
...
@@ -356,10 +372,10 @@ func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selec
}
// Status returns a minikube component status as a string
func
Status
(
ctx
context
.
Context
,
t
*
testing
.
T
,
path
string
,
profile
string
,
key
string
,
node
...
string
)
string
{
func
Status
(
ctx
context
.
Context
,
t
*
testing
.
T
,
path
string
,
profile
string
,
key
string
,
node
string
)
string
{
t
.
Helper
()
// Reminder of useful keys: "Host", "Kubelet", "APIServer"
rr
,
err
:=
Run
(
t
,
exec
.
CommandContext
(
ctx
,
path
,
"status"
,
fmt
.
Sprintf
(
"--format={{.%s}}"
,
key
),
"-p"
,
profile
))
rr
,
err
:=
Run
(
t
,
exec
.
CommandContext
(
ctx
,
path
,
"status"
,
fmt
.
Sprintf
(
"--format={{.%s}}"
,
key
),
"-p"
,
profile
,
"-n"
,
node
))
if
err
!=
nil
{
t
.
Logf
(
"status error: %v (may be ok)"
,
err
)
}
...
...
@@ -369,7 +385,7 @@ func Status(ctx context.Context, t *testing.T, path string, profile string, key
// showPodLogs logs debug info for pods
func
showPodLogs
(
ctx
context
.
Context
,
t
*
testing
.
T
,
profile
string
,
ns
string
,
names
[]
string
)
{
t
.
Helper
()
st
:=
Status
(
context
.
Background
(),
t
,
Target
(),
profile
,
"APIServer"
)
st
:=
Status
(
context
.
Background
(),
t
,
Target
(),
profile
,
"APIServer"
,
profile
)
if
st
!=
state
.
Running
.
String
()
{
t
.
Logf
(
"%q apiserver is not running, skipping kubectl commands (state=%q)"
,
profile
,
st
)
return
...
...
test/integration/main.go
浏览文件 @
70c228c2
...
...
@@ -40,6 +40,12 @@ var timeOutMultiplier = flag.Float64("timeout-multiplier", 1, "multiply the time
var
binaryPath
=
flag
.
String
(
"binary"
,
"../../out/minikube"
,
"path to minikube binary"
)
var
testdataDir
=
flag
.
String
(
"testdata-dir"
,
"testdata"
,
"the directory relative to test/integration where the testdata lives"
)
// Node names are consistent, let's store these for easy access later
const
(
SecondNodeName
=
"m02"
ThirdNodeName
=
"m03"
)
// TestMain is the test main
func
TestMain
(
m
*
testing
.
M
)
{
flag
.
Parse
()
...
...
test/integration/multinode_test.go
浏览文件 @
70c228c2
...
...
@@ -50,6 +50,7 @@ func TestMultiNode(t *testing.T) {
for
_
,
tc
:=
range
tests
{
tc
:=
tc
t
.
Run
(
tc
.
name
,
func
(
t
*
testing
.
T
)
{
defer
PostMortemLogs
(
t
,
profile
)
tc
.
validator
(
ctx
,
t
,
profile
)
})
}
...
...
@@ -104,11 +105,8 @@ func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile strin
}
func
validateStopRunningNode
(
ctx
context
.
Context
,
t
*
testing
.
T
,
profile
string
)
{
// Names are autogenerated using the node.Name() function
name
:=
"m03"
// Run minikube node stop on that node
rr
,
err
:=
Run
(
t
,
exec
.
CommandContext
(
ctx
,
Target
(),
"-p"
,
profile
,
"node"
,
"stop"
,
n
ame
))
rr
,
err
:=
Run
(
t
,
exec
.
CommandContext
(
ctx
,
Target
(),
"-p"
,
profile
,
"node"
,
"stop"
,
ThirdNodeN
ame
))
if
err
!=
nil
{
t
.
Errorf
(
"node stop returned an error. args %q: %v"
,
rr
.
Command
(),
err
)
}
...
...
@@ -143,11 +141,8 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin
// TODO (#7496): remove skip once restarts work
t
.
Skip
(
"Restarting nodes is broken :("
)
// Grab the stopped node
name
:=
"m03"
// Start the node back up
rr
,
err
:=
Run
(
t
,
exec
.
CommandContext
(
ctx
,
Target
(),
"-p"
,
profile
,
"node"
,
"start"
,
n
ame
))
rr
,
err
:=
Run
(
t
,
exec
.
CommandContext
(
ctx
,
Target
(),
"-p"
,
profile
,
"node"
,
"start"
,
ThirdNodeN
ame
))
if
err
!=
nil
{
t
.
Errorf
(
"node start returned an error. args %q: %v"
,
rr
.
Command
(),
err
)
}
...
...
@@ -168,10 +163,9 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin
}
func
validateDeleteNodeFromMultiNode
(
ctx
context
.
Context
,
t
*
testing
.
T
,
profile
string
)
{
name
:=
"m03"
// Start the node back up
rr
,
err
:=
Run
(
t
,
exec
.
CommandContext
(
ctx
,
Target
(),
"-p"
,
profile
,
"node"
,
"delete"
,
n
ame
))
rr
,
err
:=
Run
(
t
,
exec
.
CommandContext
(
ctx
,
Target
(),
"-p"
,
profile
,
"node"
,
"delete"
,
ThirdNodeN
ame
))
if
err
!=
nil
{
t
.
Errorf
(
"node stop returned an error. args %q: %v"
,
rr
.
Command
(),
err
)
}
...
...
@@ -195,7 +189,7 @@ func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile
if
err
!=
nil
{
t
.
Errorf
(
"failed to run %q : %v"
,
rr
.
Command
(),
err
)
}
if
strings
.
Contains
(
rr
.
Stdout
.
String
(),
fmt
.
Sprintf
(
"%s-%s"
,
profile
,
n
ame
))
{
if
strings
.
Contains
(
rr
.
Stdout
.
String
(),
fmt
.
Sprintf
(
"%s-%s"
,
profile
,
ThirdNodeN
ame
))
{
t
.
Errorf
(
"docker volume was not properly deleted: %s"
,
rr
.
Stdout
.
String
())
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录