Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
官方直播
Python-test_278877
提交
7ccc7641
P
Python-test_278877
项目概览
官方直播
/
Python-test_278877
与 Fork 源项目一致
Fork自
唯有杜康TM / Python获取主机系统环境信息
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Python-test_278877
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
7ccc7641
编写于
5月 11, 2023
作者:
6
622ee496dfef6c4fdb84cccd
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Thu May 11 06:42:00 UTC 2023 inscode
上级
25225d84
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
224 addition
and
71 deletion
+224
-71
main.py
main.py
+45
-71
requirements.txt
requirements.txt
+1
-0
test.ipynb
test.ipynb
+128
-0
test.py
test.py
+50
-0
未找到文件。
main.py
浏览文件 @
7ccc7641
import
argparse
import
platform
import
socket
import
psutil
import
numpy
as
np
import
matplotlib.pyplot
as
plt
def
main
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Description of your program'
)
parser
.
add_argument
(
'-i'
,
'--input'
,
help
=
'Input file path'
,
required
=
False
)
parser
.
add_argument
(
'-o'
,
'--output'
,
help
=
'Output file path'
,
required
=
False
)
args
=
parser
.
parse_args
()
def
sigmoid
(
z
):
return
1
/
(
1
+
np
.
exp
(
-
z
))
# 执行具体的操作逻辑
# ...
# 定义一个获取本机操作系统信息的方法
def
get_os_info
():
# 使用platform模块获取当前操作系统的名称
os_name
=
platform
.
system
()
# 使用platform模块获取当前操作系统的版本号
os_version
=
platform
.
release
()
# 将操作系统的名称和版本号拼接起来,返回结果字符串
return
f
'
{
os_name
}
{
os_version
}
'
# 定义一个获取本机IP地址的方法
def
get_ip_address
():
# 使用socket模块获取本机的主机名
hostname
=
socket
.
gethostname
()
# 使用socket模块获取本机的IP地址
ip_address
=
socket
.
gethostbyname
(
hostname
)
# 返回获取到的IP地址字符串
return
ip_address
def
get_system_info
():
# 获取 CPU 信息
cpu_count
=
psutil
.
cpu_count
()
# CPU 核心数
cpu_freq
=
psutil
.
cpu_freq
().
current
# CPU 当前主频
# 获取内存信息
mem
=
psutil
.
virtual_memory
()
mem_total
=
mem
.
total
//
1024
//
1024
# 总内存大小,单位 MB
mem_used
=
mem
.
used
//
1024
//
1024
# 已使用内存大小,单位 MB
# 获取磁盘信息
disk_partitions
=
psutil
.
disk_partitions
()
disk_usage
=
[]
for
partition
in
disk_partitions
:
partition_usage
=
psutil
.
disk_usage
(
partition
.
mountpoint
)
disk_usage
.
append
(
{
"mountpoint"
:
partition
.
mountpoint
,
"total_size"
:
partition_usage
.
total
//
1024
//
1024
,
# 总大小,单位 MB
"used_size"
:
partition_usage
.
used
//
1024
//
1024
,
# 已使用大小,单位 MB
"free_size"
:
partition_usage
.
free
//
1024
//
1024
})
# 剩余大小,单位 MB
return
{
"cpu_count"
:
cpu_count
,
"cpu_freq"
:
cpu_freq
,
"mem_total"
:
mem_total
,
"mem_used"
:
mem_used
,
"disk_usage"
:
disk_usage
}
if
__name__
==
'__main__'
:
main
()
os_info
=
f
'当前操作系统为:
{
get_os_info
()
}
'
ip_info
=
f
'当前IP地址为:
{
get_ip_address
()
}
'
system_info
=
f
'当前IP地址为:
{
get_system_info
()
}
'
print
(
os_info
)
print
(
ip_info
)
print
(
system_info
)
def
costFunction
(
theta
,
X
,
y
):
m
=
len
(
y
)
J
=
0
grad
=
np
.
zeros
(
theta
.
shape
)
h
=
sigmoid
(
np
.
dot
(
X
,
theta
))
J
=
(
-
1
/
m
)
*
np
.
sum
(
y
*
np
.
log
(
h
)
+
(
1
-
y
)
*
np
.
log
(
1
-
h
))
grad
=
(
1
/
m
)
*
np
.
dot
(
X
.
T
,
(
h
-
y
))
return
J
,
grad
def
gradientDescent
(
X
,
y
,
theta
,
alpha
,
num_iters
):
m
=
len
(
y
)
J_history
=
np
.
zeros
(
num_iters
)
for
i
in
range
(
num_iters
):
J_history
[
i
],
grad
=
costFunction
(
theta
,
X
,
y
)
theta
=
theta
-
alpha
*
grad
return
theta
,
J_history
# 生成样本数据
np
.
random
.
seed
(
0
)
X
=
np
.
random
.
randn
(
100
,
2
)
ones
=
np
.
ones
((
100
,
1
))
X
=
np
.
hstack
((
ones
,
X
))
y
=
np
.
random
.
randint
(
0
,
2
,
size
=
(
100
,
1
))
# 初始化theta
initial_theta
=
np
.
zeros
((
X
.
shape
[
1
],
1
))
# 梯度下降
alpha
=
0.1
num_iters
=
1000
theta
,
J_history
=
gradientDescent
(
X
,
y
,
initial_theta
,
alpha
,
num_iters
)
# 绘制决策边界
x1
=
np
.
arange
(
-
3
,
3
,
0.1
)
x2
=
-
(
theta
[
0
]
+
theta
[
1
]
*
x1
)
/
theta
[
2
]
plt
.
plot
(
x1
,
x2
,
label
=
'Decision Boundary'
)
plt
.
scatter
(
X
[:,
1
],
X
[:,
2
],
c
=
y
.
flatten
())
plt
.
legend
()
plt
.
show
()
requirements.txt
浏览文件 @
7ccc7641
numpy
\ No newline at end of file
test.ipynb
0 → 100644
浏览文件 @
7ccc7641
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"今天的日期是: 2023-05-11\n"
]
}
],
"source": [
"import datetime\n",
"\n",
"today = datetime.date.today()\n",
"print(\"今天的日期是: \", today)\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"ename": "ModuleNotFoundError",
"evalue": "No module named 'numpy'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[3], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mnumpy\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mnp\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mmatplotlib\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mpyplot\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mplt\u001b[39;00m\n\u001b[1;32m 5\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39msigmoid\u001b[39m(z):\n",
"\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'numpy'"
]
}
],
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"\n",
"\n",
"def sigmoid(z):\n",
" return 1 / (1 + np.exp(-z))\n",
"\n",
"def costFunction(theta, X, y):\n",
" m = len(y)\n",
" J = 0\n",
" grad = np.zeros(theta.shape)\n",
" \n",
" h = sigmoid(np.dot(X, theta))\n",
" J = (-1/m) * np.sum(y*np.log(h) + (1-y)*np.log(1-h))\n",
" grad = (1/m) * np.dot(X.T, (h-y))\n",
" \n",
" return J, grad\n",
"\n",
"def gradientDescent(X, y, theta, alpha, num_iters):\n",
" m = len(y)\n",
" J_history = np.zeros(num_iters)\n",
" \n",
" for i in range(num_iters):\n",
" J_history[i], grad = costFunction(theta, X, y)\n",
" theta = theta - alpha*grad\n",
" \n",
" return theta, J_history\n",
"\n",
"# 生成样本数据\n",
"np.random.seed(0)\n",
"X = np.random.randn(100, 2)\n",
"ones = np.ones((100, 1))\n",
"X = np.hstack((ones, X))\n",
"y = np.random.randint(0, 2, size=(100,1))\n",
"\n",
"# 初始化theta\n",
"initial_theta = np.zeros((X.shape[1], 1))\n",
"\n",
"# 梯度下降\n",
"alpha = 0.1\n",
"num_iters = 1000\n",
"theta, J_history = gradientDescent(X, y, initial_theta, alpha, num_iters)\n",
"\n",
"# 绘制决策边界\n",
"x1 = np.arange(-3, 3, 0.1)\n",
"x2 = -(theta[0]+theta[1]*x1)/theta[2]\n",
"plt.plot(x1, x2, label='Decision Boundary')\n",
"plt.scatter(X[:, 1], X[:, 2], c=y.flatten())\n",
"plt.legend()\n",
"plt.show()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.5"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "f9f85f796d01129d0dd105a088854619f454435301f6ffec2fea96ecbd9be4ac"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}
test.py
0 → 100644
浏览文件 @
7ccc7641
import
numpy
as
np
import
matplotlib.pyplot
as
plt
def
sigmoid
(
z
):
return
1
/
(
1
+
np
.
exp
(
-
z
))
def
costFunction
(
theta
,
X
,
y
):
m
=
len
(
y
)
J
=
0
grad
=
np
.
zeros
(
theta
.
shape
)
h
=
sigmoid
(
np
.
dot
(
X
,
theta
))
J
=
(
-
1
/
m
)
*
np
.
sum
(
y
*
np
.
log
(
h
)
+
(
1
-
y
)
*
np
.
log
(
1
-
h
))
grad
=
(
1
/
m
)
*
np
.
dot
(
X
.
T
,
(
h
-
y
))
return
J
,
grad
def
gradientDescent
(
X
,
y
,
theta
,
alpha
,
num_iters
):
m
=
len
(
y
)
J_history
=
np
.
zeros
(
num_iters
)
for
i
in
range
(
num_iters
):
J_history
[
i
],
grad
=
costFunction
(
theta
,
X
,
y
)
theta
=
theta
-
alpha
*
grad
return
theta
,
J_history
# 生成样本数据
np
.
random
.
seed
(
0
)
X
=
np
.
random
.
randn
(
100
,
2
)
ones
=
np
.
ones
((
100
,
1
))
X
=
np
.
hstack
((
ones
,
X
))
y
=
np
.
random
.
randint
(
0
,
2
,
size
=
(
100
,
1
))
# 初始化theta
initial_theta
=
np
.
zeros
((
X
.
shape
[
1
],
1
))
# 梯度下降
alpha
=
0.1
num_iters
=
1000
theta
,
J_history
=
gradientDescent
(
X
,
y
,
initial_theta
,
alpha
,
num_iters
)
# 绘制决策边界
x1
=
np
.
arange
(
-
3
,
3
,
0.1
)
x2
=
-
(
theta
[
0
]
+
theta
[
1
]
*
x1
)
/
theta
[
2
]
plt
.
plot
(
x1
,
x2
,
label
=
'Decision Boundary'
)
plt
.
scatter
(
X
[:,
1
],
X
[:,
2
],
c
=
y
.
flatten
())
plt
.
legend
()
plt
.
show
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录