From eeddf6a62b57afdae03658f5e391aab2945a5afa Mon Sep 17 00:00:00 2001 From: csdnstudent Date: Tue, 24 Oct 2023 16:49:00 +0800 Subject: [PATCH] Tue Oct 24 16:49:00 CST 2023 inscode --- .inscode | 6 +++++- build/index.js | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/.inscode b/.inscode index ecc8bdd..e177731 100644 --- a/.inscode +++ b/.inscode @@ -1,6 +1,10 @@ run = "npm i && npm run dev" +language = "node" [env] PATH = "/root/${PROJECT_DIR}/.config/npm/node_global/bin:/root/${PROJECT_DIR}/node_modules/.bin:${PATH}" XDG_CONFIG_HOME = "/root/.config" -npm_config_prefix = "/root/${PROJECT_DIR}/.config/npm/node_global" \ No newline at end of file +npm_config_prefix = "/root/${PROJECT_DIR}/.config/npm/node_global" + +[debugger] +program = "main.js" diff --git a/build/index.js b/build/index.js index 0c57de2..8cfbf11 100644 --- a/build/index.js +++ b/build/index.js @@ -33,3 +33,51 @@ if (process.env.npm_config_preview || rawArgv.includes('--preview')) { } else { run(`vue-cli-service build ${args}`) } + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +class CNN_Transformer(nn.Module): + + def __init__(self, num_classes): + super(CNN_Transformer, self).__init__() + + # Define CNN layers + self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1) + self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1) + self.fc1 = nn.Linear(64 * 16 * 16, 128) + + # Define Transformer layers + self.embedding = nn.Linear(128, 512) + self.layer_norm = nn.LayerNorm(512) + self.self_attn = nn.MultiheadAttention(512, 8, dropout=0.1) + self.dropout1 = nn.Dropout(0.1) + self.linear1 = nn.Linear(512, 256) + self.dropout2 = nn.Dropout(0.5) + self.linear2 = nn.Linear(256, 128) + + # Define output layer + self.out = nn.Linear(128, num_classes) + + def forward(self, x: Tensor) -> Tensor: + # Process input with CNN + x = F.relu(self.conv1(x)) + x = F.relu(self.conv2(x)) + x = x.view(-1, 64 * 16 * 16) + x = F.relu(self.fc1(x)) + + # Process output with Transformer + x = self.embedding(x) + x = self.layer_norm(x.permute(1, 0, 2)) + x, _ = self.self_attn(x, x, x) + x = self.dropout1(x) + x = F.relu(self.linear1(x)) + x = self.dropout2(x) + x = F.relu(self.linear2(x)) + + # Process output with output layer + x = self.out(x) + return x + -- GitLab