Thu May 11 06:42:00 UTC 2023 inscode

上级 25225d84
import argparse
import platform
import socket
import psutil
import numpy as np
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-i', '--input', help='Input file path', required=False)
parser.add_argument('-o', '--output', help='Output file path', required=False)
args = parser.parse_args()
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# 执行具体的操作逻辑
# ...
# 定义一个获取本机操作系统信息的方法
def get_os_info():
# 使用platform模块获取当前操作系统的名称
os_name = platform.system()
# 使用platform模块获取当前操作系统的版本号
os_version = platform.release()
# 将操作系统的名称和版本号拼接起来,返回结果字符串
return f'{os_name} {os_version}'
# 定义一个获取本机IP地址的方法
def get_ip_address():
# 使用socket模块获取本机的主机名
hostname = socket.gethostname()
# 使用socket模块获取本机的IP地址
ip_address = socket.gethostbyname(hostname)
# 返回获取到的IP地址字符串
return ip_address
def get_system_info():
# 获取 CPU 信息
cpu_count = psutil.cpu_count() # CPU 核心数
cpu_freq = psutil.cpu_freq().current # CPU 当前主频
# 获取内存信息
mem = psutil.virtual_memory()
mem_total = mem.total // 1024 // 1024 # 总内存大小,单位 MB
mem_used = mem.used // 1024 // 1024 # 已使用内存大小,单位 MB
# 获取磁盘信息
disk_partitions = psutil.disk_partitions()
disk_usage = []
for partition in disk_partitions:
partition_usage = psutil.disk_usage(partition.mountpoint)
disk_usage.append(
{"mountpoint": partition.mountpoint,
"total_size": partition_usage.total // 1024 // 1024, # 总大小,单位 MB
"used_size": partition_usage.used // 1024 // 1024, # 已使用大小,单位 MB
"free_size": partition_usage.free // 1024 // 1024}) # 剩余大小,单位 MB
return {"cpu_count": cpu_count,
"cpu_freq": cpu_freq,
"mem_total": mem_total,
"mem_used": mem_used,
"disk_usage": disk_usage}
if __name__ == '__main__':
main()
os_info = f'当前操作系统为:{get_os_info()}'
ip_info = f'当前IP地址为:{get_ip_address()}'
system_info = f'当前IP地址为:{get_system_info()}'
print(os_info)
print(ip_info)
print(system_info)
def costFunction(theta, X, y):
m = len(y)
J = 0
grad = np.zeros(theta.shape)
h = sigmoid(np.dot(X, theta))
J = (-1/m) * np.sum(y*np.log(h) + (1-y)*np.log(1-h))
grad = (1/m) * np.dot(X.T, (h-y))
return J, grad
def gradientDescent(X, y, theta, alpha, num_iters):
m = len(y)
J_history = np.zeros(num_iters)
for i in range(num_iters):
J_history[i], grad = costFunction(theta, X, y)
theta = theta - alpha*grad
return theta, J_history
# 生成样本数据
np.random.seed(0)
X = np.random.randn(100, 2)
ones = np.ones((100, 1))
X = np.hstack((ones, X))
y = np.random.randint(0, 2, size=(100,1))
# 初始化theta
initial_theta = np.zeros((X.shape[1], 1))
# 梯度下降
alpha = 0.1
num_iters = 1000
theta, J_history = gradientDescent(X, y, initial_theta, alpha, num_iters)
# 绘制决策边界
x1 = np.arange(-3, 3, 0.1)
x2 = -(theta[0]+theta[1]*x1)/theta[2]
plt.plot(x1, x2, label='Decision Boundary')
plt.scatter(X[:, 1], X[:, 2], c=y.flatten())
plt.legend()
plt.show()
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"今天的日期是: 2023-05-11\n"
]
}
],
"source": [
"import datetime\n",
"\n",
"today = datetime.date.today()\n",
"print(\"今天的日期是: \", today)\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"ename": "ModuleNotFoundError",
"evalue": "No module named 'numpy'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[3], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mnumpy\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mnp\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mmatplotlib\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mpyplot\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mplt\u001b[39;00m\n\u001b[1;32m 5\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39msigmoid\u001b[39m(z):\n",
"\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'numpy'"
]
}
],
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"\n",
"\n",
"def sigmoid(z):\n",
" return 1 / (1 + np.exp(-z))\n",
"\n",
"def costFunction(theta, X, y):\n",
" m = len(y)\n",
" J = 0\n",
" grad = np.zeros(theta.shape)\n",
" \n",
" h = sigmoid(np.dot(X, theta))\n",
" J = (-1/m) * np.sum(y*np.log(h) + (1-y)*np.log(1-h))\n",
" grad = (1/m) * np.dot(X.T, (h-y))\n",
" \n",
" return J, grad\n",
"\n",
"def gradientDescent(X, y, theta, alpha, num_iters):\n",
" m = len(y)\n",
" J_history = np.zeros(num_iters)\n",
" \n",
" for i in range(num_iters):\n",
" J_history[i], grad = costFunction(theta, X, y)\n",
" theta = theta - alpha*grad\n",
" \n",
" return theta, J_history\n",
"\n",
"# 生成样本数据\n",
"np.random.seed(0)\n",
"X = np.random.randn(100, 2)\n",
"ones = np.ones((100, 1))\n",
"X = np.hstack((ones, X))\n",
"y = np.random.randint(0, 2, size=(100,1))\n",
"\n",
"# 初始化theta\n",
"initial_theta = np.zeros((X.shape[1], 1))\n",
"\n",
"# 梯度下降\n",
"alpha = 0.1\n",
"num_iters = 1000\n",
"theta, J_history = gradientDescent(X, y, initial_theta, alpha, num_iters)\n",
"\n",
"# 绘制决策边界\n",
"x1 = np.arange(-3, 3, 0.1)\n",
"x2 = -(theta[0]+theta[1]*x1)/theta[2]\n",
"plt.plot(x1, x2, label='Decision Boundary')\n",
"plt.scatter(X[:, 1], X[:, 2], c=y.flatten())\n",
"plt.legend()\n",
"plt.show()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.5"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "f9f85f796d01129d0dd105a088854619f454435301f6ffec2fea96ecbd9be4ac"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def costFunction(theta, X, y):
m = len(y)
J = 0
grad = np.zeros(theta.shape)
h = sigmoid(np.dot(X, theta))
J = (-1/m) * np.sum(y*np.log(h) + (1-y)*np.log(1-h))
grad = (1/m) * np.dot(X.T, (h-y))
return J, grad
def gradientDescent(X, y, theta, alpha, num_iters):
m = len(y)
J_history = np.zeros(num_iters)
for i in range(num_iters):
J_history[i], grad = costFunction(theta, X, y)
theta = theta - alpha*grad
return theta, J_history
# 生成样本数据
np.random.seed(0)
X = np.random.randn(100, 2)
ones = np.ones((100, 1))
X = np.hstack((ones, X))
y = np.random.randint(0, 2, size=(100,1))
# 初始化theta
initial_theta = np.zeros((X.shape[1], 1))
# 梯度下降
alpha = 0.1
num_iters = 1000
theta, J_history = gradientDescent(X, y, initial_theta, alpha, num_iters)
# 绘制决策边界
x1 = np.arange(-3, 3, 0.1)
x2 = -(theta[0]+theta[1]*x1)/theta[2]
plt.plot(x1, x2, label='Decision Boundary')
plt.scatter(X[:, 1], X[:, 2], c=y.flatten())
plt.legend()
plt.show()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册