提交 8e7948b2 编写于 作者: R root

Tue Jul 1 18:12:01 CST 2025 inscode

上级 0a575cc3
run = "pip install -r requirements.txt;python main.py" run = "cd deeppolar-main && python main.py --test --N 256 --K 37 --kernel_size 16 --test_snr_start -5 --test_snr_end 5 --snr_points 5"
language = "python" is_gui = false
is_resident = true
[packager] is_html = false
AUTO_PIP = true
[env]
VIRTUAL_ENV = "/root/${PROJECT_DIR}/venv"
PATH = "${VIRTUAL_ENV}/bin:${PATH}"
PYTHONPATH = "$PYTHONHOME/lib/python3.10:${VIRTUAL_ENV}/lib/python3.10/site-packages"
REPLIT_POETRY_PYPI_REPOSITORY = "http://mirrors.csdn.net.cn/repository/csdn-pypi-mirrors/simple"
MPLBACKEND = "TkAgg"
POETRY_CACHE_DIR = "/root/${PROJECT_DIR}/.cache/pypoetry"
[debugger]
program = "main.py"
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
MIT License
Copyright (c) 2024 Ashwin Hebbar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# DeepPolar codes
Code for "[DeepPolar: Inventing Nonlinear Large-Kernel Polar Codes via Deep Learning](https://arxiv.org/abs/2402.08864)", ICML 2024
## Installation
First, clone the repository to your local machine:
```bash
git clone https://github.com/hebbarashwin/deeppolar.git
cd deeppolar
```
Then, install the required Python packages:
```bash
pip install -r requirements.txt
```
## Usage
Best results are obtained by pretraining kernels using curriculum training and initializing the network using these pretrained kernels. (training from scratch may work too)
An exemplar kernel has been provided. Command to run:
(You can set --id for different runs.)
```bash
python -u main.py --N 256 --K 37 -ell 16 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 2000 --enc_train_snr 0 --dec_train_snr -2 --enc_hidden_size 64 --dec_hidden_size 128 --enc_lr 0.0001 --dec_lr 0.0001 --weight_decay 0 --test_snr_start -5 --test_snr_end -1 --snr_points 5 --batch_size 20000 --id run1 --kernel_load_path Polar_Results/curriculum/final_kernels/16_normal_polar_eh64_dh128_selu --gpu -2
```
- `N`, `K`: Code parameters
- `-ell`, Kernel size; \sqrt{N} works best
- `kernel_load_path`: Path to load specific model kernels. (if training from scratch, don't set this flag)
- `enc_train_iters`, `dec_train_iters`: Number of training iterations for the encoder and decoder.
- `full_iters`: Total iterations for full training cycles.
- `id`: Identifier for the run.
- `model_save_per`: Frequency of saving the trained models.
- `gpu` : -2 : cuda, -1 : cpu, 0/1/2/3 : specific gpu
The kernels can be pretrained, for example by running
```bash
bash pretrain.sh
```
(Typically we don't need to train each kernel for as many iterations as this script.)
Testing
```bash
python -u main.py --N 256 --K 37 -ell 16 --enc_hidden_size 64 --dec_hidden_size 128 --test_snr_start -5 --test_snr_end -1 --snr_points 5 --test_batch_size 10000 --id run1 --weight_decay 0. --num_errors 100 --test
```
(More details will be added soon.)
- Finetuning with increasingly large batch sizes improves high-SNR performance.
- BER gain can be traded off for BLER by finetuning with a BLER surrogate loss.
\ No newline at end of file
此差异已折叠。
deeppolar-main/figures/256_37_improved_bler.pdf
\ No newline at end of file
此差异已折叠。
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import time
from math import sqrt
# Add TT Linear layer implementation
class TTLinear(nn.Module):
def __init__(self, in_features, out_features, ranks, activation=None, bias=True):
super(TTLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.ranks = ranks
self.activation = activation
# Factorize input and output dimensions
self.in_shape, self.out_shape = self.factorize_dims(in_features, out_features)
# Create TT cores
self.cores = nn.ParameterList()
for i in range(len(self.in_shape)):
if i == 0:
input_rank = 1
else:
input_rank = ranks[i-1]
if i == len(self.in_shape)-1:
output_rank = 1
else:
output_rank = ranks[i]
core = nn.Parameter(torch.randn(
input_rank, self.in_shape[i], self.out_shape[i], output_rank
) * sqrt(2.0 / (input_rank * self.in_shape[i])))
self.cores.append(core)
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
else:
self.register_parameter('bias', None)
def factorize_dims(self, in_dim, out_dim):
# Simple factorization - can be customized
factors = []
d = in_dim
while d > 1:
for i in range(int(sqrt(d)), 0, -1):
if d % i == 0:
factors.append(i)
factors.append(d // i)
break
d = factors.pop()
in_shape = factors
factors = []
d = out_dim
while d > 1:
for i in range(int(sqrt(d)), 0, -1):
if d % i == 0:
factors.append(i)
factors.append(d // i)
break
d = factors.pop()
out_shape = factors
return in_shape, out_shape
def forward(self, x):
batch_size = x.size(0)
# Reshape input to tensor form
x = x.view(batch_size, *self.in_shape)
# Contract input with first core
res = torch.einsum('bi,rivo->bvo', x, self.cores[0])
# Contract with remaining cores
for i in range(1, len(self.cores)):
res = torch.einsum('bvi,rivo->bvo', res, self.cores[i])
# Reshape to output dimension
res = res.contiguous().view(batch_size, self.out_features)
if self.bias is not None:
res = res + self.bias
if self.activation is not None:
res = self.activation(res)
return res
# Modify get_activation_fn to include TTLinear option
def get_activation_fn(activation):
if activation == 'tanh':
return F.tanh
elif activation == 'elu':
return F.elu
elif activation == 'relu':
return F.relu
elif activation == 'selu':
return F.selu
elif activation == 'sigmoid':
return F.sigmoid
elif activation == 'gelu':
return F.gelu
elif activation == 'silu':
return F.silu
elif activation == 'mish':
return F.mish
elif activation == 'linear':
return nn.Identity()
else:
raise NotImplementedError(f'Activation function {activation} not implemented')
# Modify g_Full to use TTLinear
class g_Full(nn.Module):
def __init__(self, input_size, hidden_size, output_size, depth=3, skip_depth=1,
skip_layer=1, ell=2, activation='selu', use_skip=False, augment=False,
use_tt=False, tt_ranks=None):
super(g_Full, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.depth = depth
self.ell = ell
self.ell_input_size = input_size//self.ell
self.augment = augment
self.activation_fn = get_activation_fn(activation)
self.skip_depth = skip_depth
self.skip_layer = skip_layer
self.use_skip = use_skip
self.use_tt = use_tt
self.tt_ranks = tt_ranks if tt_ranks is not None else [8, 8] # Default ranks
if self.use_skip:
if self.use_tt:
self.skip = nn.ModuleList([TTLinear(self.input_size + self.output_size,
self.hidden_size,
ranks=self.tt_ranks,
activation=self.activation_fn)])
self.skip.extend([TTLinear(self.hidden_size, self.hidden_size,
ranks=self.tt_ranks,
activation=self.activation_fn)
for ii in range(1, self.skip_depth)])
else:
self.skip = nn.ModuleList([nn.Linear(self.input_size + self.output_size,
self.hidden_size, bias=True)])
self.skip.extend([nn.Linear(self.hidden_size, self.hidden_size, bias=True)
for ii in range(1, self.skip_depth)])
if self.use_tt:
self.linears = nn.ModuleList([TTLinear(self.input_size, self.hidden_size,
ranks=self.tt_ranks,
activation=self.activation_fn)])
self.linears.extend([TTLinear(self.hidden_size, self.hidden_size,
ranks=self.tt_ranks,
activation=self.activation_fn)
for ii in range(1, self.depth)])
self.linears.append(TTLinear(self.hidden_size, self.output_size,
ranks=self.tt_ranks))
else:
self.linears = nn.ModuleList([nn.Linear(self.input_size, self.hidden_size, bias=True)])
self.linears.extend([nn.Linear(self.hidden_size, self.hidden_size, bias=True)
for ii in range(1, self.depth)])
self.linears.append(nn.Linear(self.hidden_size, self.output_size, bias=True))
def __init__(self, input_size, hidden_size, output_size, depth=3, skip_depth = 1, skip_layer = 1, ell = 2, activation = 'selu', use_skip = False, augment = False):
super(g_Full, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.depth = depth
self.ell = ell
self.ell_input_size = input_size//self.ell
self.augment = augment
self.activation_fn = get_activation_fn(activation)
self.skip_depth = skip_depth
self.skip_layer = skip_layer
self.use_skip = use_skip
if self.use_skip:
self.skip = nn.ModuleList([nn.Linear(self.input_size + self.output_size, self.hidden_size, bias=True)])
self.skip.extend([nn.Linear(self.hidden_size, self.hidden_size, bias=True) for ii in range(1, self.skip_depth)])
self.linears = nn.ModuleList([nn.Linear(self.input_size, self.hidden_size, bias=True)])
self.linears.extend([nn.Linear(self.hidden_size, self.hidden_size, bias=True) for ii in range(1, self.depth)])
self.linears.append(nn.Linear(self.hidden_size, self.output_size, bias=True))
@staticmethod
def get_augment(msg, ell):
u = msg.clone()
n = int(np.log2(ell))
for d in range(0, n):
num_bits = 2**d
for i in np.arange(0, ell, 2*num_bits):
# [u v] encoded to [u xor(u,v)]
if len(u.shape) == 2:
u = torch.cat((u[:, :i], u[:, i:i+num_bits].clone() * u[:, i+num_bits: i+2*num_bits], u[:, i+num_bits:]), dim=1)
elif len(u.shape) == 3:
u = torch.cat((u[:, :, :i], u[:, :, i:i+num_bits].clone() * u[:, :, i+num_bits: i+2*num_bits], u[:, :, i+num_bits:]), dim=2)
# u[:, i:i+num_bits] = u[:, i:i+num_bits].clone() * u[:, i+num_bits: i+2*num_bits].clone
if len(u.shape) == 3:
return u[:, :, :-1]
elif len(u.shape) == 2:
return u[:, :-1]
def forward(self, y):
x = y.clone()
for ii, layer in enumerate(self.linears):
if ii != self.depth:
x = self.activation_fn(layer(x))
if self.use_skip and ii == self.skip_layer:
if len(x.shape) == 3:
skip_input = torch.cat([y, g_Full.get_augment(y, self.ell)], dim = 2)
elif len(x.shape) == 2:
skip_input = torch.cat([y, g_Full.get_augment(y, self.ell)], dim = 1)
for jj, skip_layer in enumerate(self.skip):
skip_input = self.activation_fn(skip_layer(skip_input))
x = x + skip_input
else:
x = layer(x)
if self.augment:
x = x + g_Full.get_augment(y, self.ell)
return x
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.01)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(0.0, 0.01)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.01)
try:
m.bias.data.fill_(0.)
except:
pass
# Modify f_Full to use TTLinear
class f_Full(nn.Module):
def __init__(self, input_size, hidden_size, output_size, dropout_p=0.,
activation='selu', depth=3, use_norm=False, use_tt=False, tt_ranks=None):
super(f_Full, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.depth = depth
self.use_norm = use_norm
self.activation_fn = get_activation_fn(activation)
self.use_tt = use_tt
self.tt_ranks = tt_ranks if tt_ranks is not None else [8, 8] # Default ranks
if self.use_tt:
self.linears = nn.ModuleList([TTLinear(self.input_size, self.hidden_size,
ranks=self.tt_ranks,
activation=self.activation_fn)])
for ii in range(1, self.depth):
self.linears.append(TTLinear(self.hidden_size, self.hidden_size,
ranks=self.tt_ranks,
activation=self.activation_fn))
self.linears.append(TTLinear(self.hidden_size, self.output_size,
ranks=self.tt_ranks))
else:
self.linears = nn.ModuleList([nn.Linear(self.input_size, self.hidden_size, bias=True)])
for ii in range(1, self.depth):
self.linears.append(nn.Linear(self.hidden_size, self.hidden_size, bias=True))
self.linears.append(nn.Linear(self.hidden_size, self.output_size, bias=True))
if self.use_norm:
self.norms = nn.ModuleList([nn.LayerNorm(self.hidden_size)
for _ in range(self.depth)])
def __init__(self, input_size, hidden_size, output_size, dropout_p = 0., activation = 'selu', depth=3, use_norm = False):
super(f_Full, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.depth = depth
self.use_norm = use_norm
self.activation_fn = get_activation_fn(activation)
self.linears = nn.ModuleList([nn.Linear(self.input_size, self.hidden_size, bias=True)])
if self.use_norm:
self.norms = nn.ModuleList([nn.LayerNorm(self.hidden_size)])
for ii in range(1, self.depth):
self.linears.append(nn.Linear(self.hidden_size, self.hidden_size, bias=True))
if self.use_norm:
self.norms.append(nn.LayerNorm(self.hidden_size))
self.linears.append(nn.Linear(self.hidden_size, self.output_size, bias=True))
def forward(self, y, aug = None):
x = y.clone()
for ii, layer in enumerate(self.linears):
if ii != self.depth:
x = layer(x)
if not hasattr(self, 'use_norm') or not self.use_norm:
pass
else:
x = self.norms[ii](x)
x = self.activation_fn(x)
else:
x = layer(x)
return x
def get_onehot(actions):
inds = (0.5 + 0.5*actions).long()
return torch.eye(2, device = inds.device)[inds].reshape(actions.shape[0], -1)
此差异已折叠。
python -u main.py --N 16 --K 1 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 500 --batch_size 20000 --enc_train_snr -1 --dec_train_snr -3 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start -7 --test_snr_end 0 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_1_normal_polar_eh64_dh128_selu_new --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 2 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 500 --batch_size 20000 --enc_train_snr 2 --dec_train_snr 0 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start -4 --test_snr_end 3 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_2_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_1_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 3 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 3 --dec_train_snr 1 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start -3 --test_snr_end 4 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_3_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_2_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 4 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 3 --dec_train_snr 2 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start -3 --test_snr_end 4 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_4_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_3_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 5 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 4 --dec_train_snr 2.5 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start -2 --test_snr_end 5 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_5_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_4_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 6 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 5 --dec_train_snr 4 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start -1 --test_snr_end 6 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_6_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_5_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 7 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 6 --dec_train_snr 4 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start 0 --test_snr_end 7 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_7_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_6_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 8 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 6 --dec_train_snr 5 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start 0 --test_snr_end 7 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_8_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_7_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 9 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 6.5 --dec_train_snr 5 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start 1 --test_snr_end 8 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_9_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_8_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 10 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 7 --dec_train_snr 6 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start 1 --test_snr_end 8 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_10_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_9_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 11 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 7 --dec_train_snr 6 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start 1 --test_snr_end 8 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_11_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_10_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 12 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 8.5 --dec_train_snr 7 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start 3 --test_snr_end 10 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_12_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_11_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 13 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 9 --dec_train_snr 8 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start 3 --test_snr_end 10 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_13_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_12_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 14 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 9.5 --dec_train_snr 8 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start 4 --test_snr_end 11 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_14_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_13_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 15 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 10 --dec_train_snr 9 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start 4 --test_snr_end 11 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_15_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_14_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
python -u main.py --N 16 --K 16 --model_save_per 100 --enc_train_iters 20 --dec_train_iters 200 --full_iters 1000 --batch_size 20000 --enc_train_snr 12 --dec_train_snr 11 --enc_lr 0.0003 --dec_lr 0.0003 --num_errors 10 --test_snr_start 6 --test_snr_end 13 --snr_points 8 -ell 16 --encoder_type KO --enc_activation selu --dec_activation selu --dec_hidden_size 128 --enc_hidden_size 64 --save_path Polar_Results/curriculum/16_16_normal_polar_eh64_dh128_selu_new --load_path Polar_Results/curriculum/16_15_normal_polar_eh64_dh128_selu_new/Models/fnet_gnet_final.pt --gpu 0 --regularizer polar --regularizer_weight 0.05
bash copy_files.sh 16 normal_polar_eh64_dh128_selu_new
\ No newline at end of file
#
# This file is autogenerated by pip-compile with Python 3.10
# by the following command:
#
# pip-compile requirements.in
#
argparse==1.4.0
# via -r requirements.in
contourpy==1.2.1
# via matplotlib
cycler==0.12.1
# via matplotlib
filelock==3.14.0
# via
# torch
# triton
fonttools==4.51.0
# via matplotlib
fsspec==2024.3.1
# via torch
jinja2==3.1.4
# via torch
kiwisolver==1.4.5
# via matplotlib
markupsafe==2.1.5
# via jinja2
matplotlib==3.8.4
# via -r requirements.in
mpmath==1.3.0
# via sympy
networkx==3.3
# via torch
numpy==1.26.4
# via
# -r requirements.in
# contourpy
# matplotlib
nvidia-cublas-cu12==12.1.3.1
# via
# nvidia-cudnn-cu12
# nvidia-cusolver-cu12
# torch
nvidia-cuda-cupti-cu12==12.1.105
# via torch
nvidia-cuda-nvrtc-cu12==12.1.105
# via torch
nvidia-cuda-runtime-cu12==12.1.105
# via torch
nvidia-cudnn-cu12==8.9.2.26
# via torch
nvidia-cufft-cu12==11.0.2.54
# via torch
nvidia-curand-cu12==10.3.2.106
# via torch
nvidia-cusolver-cu12==11.4.5.107
# via torch
nvidia-cusparse-cu12==12.1.0.106
# via
# nvidia-cusolver-cu12
# torch
nvidia-nccl-cu12==2.20.5
# via torch
nvidia-nvjitlink-cu12==12.4.127
# via
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvtx-cu12==12.1.105
# via torch
packaging==24.0
# via matplotlib
pillow==10.3.0
# via matplotlib
pyparsing==3.1.2
# via matplotlib
python-dateutil==2.9.0.post0
# via matplotlib
six==1.16.0
# via python-dateutil
sympy==1.12
# via torch
torch==2.3.0
# via -r requirements.in
tqdm==4.66.4
# via -r requirements.in
triton==2.3.0
# via torch
typing-extensions==4.11.0
# via torch
import torch
import torch.nn.functional as F
import numpy as np
from utils import errors_ber, errors_bler, dec2bitarray, snr_db2sigma
import time
def train(args, polar, optimizer, scheduler, batch_size, train_snr, train_iters, criterion, device, info_positions, binary = False, noise_type = 'awgn'):
if args.N == polar.ell:
assert len(info_positions) == args.K
kernel = True
else:
kernel = False
for iter in range(train_iters):
if batch_size > args.small_batch_size:
small_batch_size = args.small_batch_size
else:
small_batch_size = batch_size
num_batches = batch_size // small_batch_size
for ii in range(num_batches):
msg_bits = 1 - 2*(torch.rand(small_batch_size, args.K) > 0.5).float().to(device)
if args.encoder_type == 'polar':
codes = polar.encode_plotkin(msg_bits)
elif 'KO' in args.encoder_type:
if kernel:
codes = polar.kernel_encode(args.kernel_size, polar.gnet_dict[1][0], msg_bits, info_positions, binary = binary)
else:
codes = polar.deeppolar_encode(msg_bits, binary = binary)
noisy_codes = polar.channel(codes, train_snr, noise_type)
if 'KO' in args.decoder_type:
if kernel:
if args.decoder_type == 'KO_parallel':
decoded_llrs, decoded_bits = polar.kernel_parallel_decode(args.kernel_size, polar.fnet_dict[1][0], noisy_codes, info_positions)
else:
decoded_llrs, decoded_bits = polar.kernel_decode(args.kernel_size, polar.fnet_dict[1][0], noisy_codes, info_positions)
else:
decoded_llrs, decoded_bits = polar.deeppolar_decode(noisy_codes)
elif args.decoder_type == 'SC':
decoded_llrs, decoded_bits = polar.sc_decode_new(noisy_codes, train_snr)
if 'BCE' in args.loss or args.loss == 'focal':
loss = criterion(decoded_llrs, 0.5 * msg_bits.to(polar.device) + 0.5)
else:
loss = criterion(torch.tanh(0.5*decoded_llrs), msg_bits.to(polar.device))
if args.regularizer == 'std':
if args.K == 1:
loss += args.regularizer_weight * torch.std(codes, dim=1).mean()
elif args.K == 2:
loss += args.regularizer_weight * (0.5*torch.std(codes[:, ::2], dim=1).mean() + .5*torch.std(codes[:, 1::2], dim=1).mean())
elif args.regularizer == 'max_deviation':
if args.K == 1:
loss += args.regularizer_weight * torch.amax(torch.abs(codes - codes.mean(dim=1, keepdim=True)), dim=1).mean()
elif args.K == 2:
loss += args.regularizer_weight * (0.5*torch.amax(torch.abs(codes[:, ::2] - codes[:, ::2].mean(dim=1, keepdim=True)), dim=1).mean() + .5*torch.amax(torch.abs(codes[:, 1::2] - codes[:, 1::2].mean(dim=1, keepdim=True)), dim=1).mean())
elif args.regularizer == 'polar':
loss += args.regularizer_weight * F.mse_loss(codes, polar.encode_plotkin(msg_bits))
loss = loss/num_batches
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
optimizer.zero_grad()
train_ber = errors_ber(decoded_bits.sign(), msg_bits.to(polar.device)).item()
return loss.item(), train_ber
def deeppolar_full_test(args, polar, KO, snr_range, device, info_positions, binary=False, num_errors=100, noise_type = 'awgn'):
bers_KO_test = [0. for _ in snr_range]
blers_KO_test = [0. for _ in snr_range]
bers_SC_test = [0. for _ in snr_range]
blers_SC_test = [0. for _ in snr_range]
kernel = args.N == KO.ell
print(f"TESTING until {num_errors} block errors")
for snr_ind, snr in enumerate(snr_range):
total_block_errors_SC = 0
total_block_errors_KO = 0
batches_processed = 0
sigma = snr_db2sigma(snr) # Assuming SNR is given in dB and noise variance is derived from it
try:
while min(total_block_errors_SC, total_block_errors_KO) <= num_errors:
msg_bits = 2 * (torch.rand(args.test_batch_size, args.K) < 0.5).float() - 1
msg_bits = msg_bits.to(device)
polar_code = polar.encode_plotkin(msg_bits)
if 'KO' in args.encoder_type:
if kernel:
KO_polar_code = KO.kernel_encode(args.kernel_size, KO.gnet_dict[1][0], msg_bits, info_positions, binary=binary)
else:
KO_polar_code = KO.deeppolar_encode(msg_bits, binary=binary)
noisy_code = polar.channel(polar_code, snr, noise_type)
noise = noisy_code - polar_code
noisy_KO_code = KO_polar_code + noise if 'KO' in args.encoder_type else noisy_code
SC_llrs, decoded_SC_msg_bits = polar.sc_decode_new(noisy_code, snr)
ber_SC = errors_ber(msg_bits, decoded_SC_msg_bits.sign()).item()
bler_SC = errors_bler(msg_bits, decoded_SC_msg_bits.sign()).item()
total_block_errors_SC += int(bler_SC*args.test_batch_size)
if 'KO' in args.decoder_type:
if kernel:
if args.decoder_type == 'KO_parallel':
KO_llrs, decoded_KO_msg_bits = KO.kernel_parallel_decode(args.kernel_size, KO.fnet_dict[1][0], noisy_KO_code, info_positions)
else:
KO_llrs, decoded_KO_msg_bits = KO.kernel_decode(args.kernel_size, KO.fnet_dict[1][0], noisy_KO_code, info_positions)
else:
KO_llrs, decoded_KO_msg_bits = KO.deeppolar_decode(noisy_KO_code)
else: # if SC is also used for KO
KO_llrs, decoded_KO_msg_bits = KO.sc_decode_new(noisy_KO_code, snr)
ber_KO = errors_ber(msg_bits, decoded_KO_msg_bits.sign()).item()
bler_KO = errors_bler(msg_bits, decoded_KO_msg_bits.sign()).item()
total_block_errors_KO += int(bler_KO*args.test_batch_size)
batches_processed += 1
# Update accumulative results for logging
bers_KO_test[snr_ind] += ber_KO
bers_SC_test[snr_ind] += ber_SC
blers_KO_test[snr_ind] += bler_KO
blers_SC_test[snr_ind] += bler_SC
# Real-time logging for progress, updating in-place
print(f"SNR: {snr} dB, Sigma: {sigma:.5f}, SC_BER: {bers_SC_test[snr_ind]/batches_processed:.6f}, SC_BLER: {blers_SC_test[snr_ind]/batches_processed:.6f}, KO_BER: {bers_KO_test[snr_ind]/batches_processed:.6f}, KO_BLER: {blers_KO_test[snr_ind]/batches_processed:.6f}, Batches: {batches_processed}", end='\r')
except KeyboardInterrupt:
# print("\nInterrupted by user. Finalizing current SNR...")
pass
# Normalize cumulative metrics by the number of processed batches for accuracy
bers_KO_test[snr_ind] /= (batches_processed + 0.00000001)
bers_SC_test[snr_ind] /= (batches_processed + 0.00000001)
blers_KO_test[snr_ind] /= (batches_processed + 0.00000001)
blers_SC_test[snr_ind] /= (batches_processed + 0.00000001)
print(f"SNR: {snr} dB, Sigma: {sigma:.5f}, SC_BER: {bers_SC_test[snr_ind]:.6f}, SC_BLER: {blers_SC_test[snr_ind]:.6f}, KO_BER: {bers_KO_test[snr_ind]:.6f}, KO_BLER: {blers_KO_test[snr_ind]:.6f}")
return bers_SC_test, blers_SC_test, bers_KO_test, blers_KO_test
import torch
from utils import moving_average
import matplotlib.pyplot as plt
import os
def save_model(polar, iter, results_save_path, best = False):
torch.save([polar.fnet_dict, polar.gnet_dict, polar.depth_map], os.path.join(results_save_path, 'Models/fnet_gnet_{}.pt'.format(iter)))
if iter > 1:
torch.save([polar.fnet_dict, polar.gnet_dict, polar.depth_map], os.path.join(results_save_path, 'Models/fnet_gnet_{}.pt'.format('final')))
if best:
torch.save([polar.fnet_dict, polar.gnet_dict, polar.depth_map], os.path.join(results_save_path, 'Models/fnet_gnet_{}.pt'.format('best')))
def plot_stuff(bers_enc, losses_enc, bers_dec, losses_dec, results_save_path):
plt.figure()
plt.plot(bers_enc, label = 'BER')
plt.plot(moving_average(bers_enc, n=10), label = 'BER moving avg')
plt.yscale('log')
plt.legend(loc='best')
plt.title('Training BER ENC')
plt.savefig(os.path.join(results_save_path,'training_ber_enc.png'))
plt.close()
plt.figure()
plt.plot(losses_enc, label = 'Losses')
plt.plot(moving_average(losses_enc, n=10), label='Losses moving avg')
plt.yscale('log')
plt.legend(loc='best')
plt.title('Training loss ENC')
plt.savefig(os.path.join(results_save_path ,'training_losses_enc.png'))
plt.close()
plt.figure()
plt.plot(bers_dec, label = 'BER')
plt.plot(moving_average(bers_dec, n=10), label = 'BER moving avg')
plt.yscale('log')
plt.legend(loc='best')
plt.title('Training BER DEC')
plt.savefig(os.path.join(results_save_path,'training_ber_dec.png'))
plt.close()
plt.figure()
plt.plot(losses_dec, label = 'Losses')
plt.plot(moving_average(losses_dec, n=10), label='Losses moving avg')
plt.yscale('log')
plt.legend(loc='best')
plt.title('Training loss DEC')
plt.savefig(os.path.join(results_save_path ,'training_losses_dec.png'))
plt.close()
\ No newline at end of file
import torch
import torch.nn.functional as F
from torch.distributions import Normal, StudentT
import numpy as np
from itertools import combinations
def snr_db2sigma(train_snr):
return 10**(-train_snr*1.0/20)
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def errors_ber(y_true, y_pred, mask=None):
if mask == None:
mask=torch.ones(y_true.size(),device=y_true.device)
y_true = y_true.view(y_true.shape[0], -1, 1)
y_pred = y_pred.view(y_pred.shape[0], -1, 1)
mask = mask.view(mask.shape[0], -1, 1)
myOtherTensor = (mask*torch.ne(torch.round(y_true), torch.round(y_pred))).float()
res = sum(sum(myOtherTensor))/(torch.sum(mask))
return res
def errors_bler(y_true, y_pred, get_pos = False):
y_true = y_true.view(y_true.shape[0], -1, 1)
y_pred = y_pred.view(y_pred.shape[0], -1, 1)
decoded_bits = torch.round(y_pred).cpu()
X_test = torch.round(y_true).cpu()
tp0 = (abs(decoded_bits-X_test)).view([X_test.shape[0],X_test.shape[1]])
tp0 = tp0.detach().cpu().numpy()
bler_err_rate = sum(np.sum(tp0,axis=1)>0)*1.0/(X_test.shape[0])
if not get_pos:
return bler_err_rate
else:
err_pos = list(np.nonzero((np.sum(tp0,axis=1)>0).astype(int))[0])
return bler_err_rate, err_pos
def corrupt_signal(input_signal, sigma = 1.0, noise_type = 'awgn', vv =5.0, radar_power = 20.0, radar_prob = 0.05):
data_shape = input_signal.shape # input_signal has to be a numpy array.
assert noise_type in ['bsc', 'awgn', 'fading', 'radar', 't-dist', 'isi_perfect', 'isi_uncertain'], "Invalid noise type"
device = input_signal.device
if noise_type == 'awgn':
dist = Normal(torch.tensor([0.0], device = device), torch.tensor([sigma], device = device))
noise = dist.sample(input_signal.shape).squeeze()
corrupted_signal = input_signal + noise
elif noise_type == 'fading':
fading_h = torch.sqrt(torch.randn_like(input_signal)**2 + torch.randn_like(input_signal)**2)/np.sqrt(3.14/2.0)
noise = sigma * torch.randn_like(input_signal) # Define noise
corrupted_signal = fading_h *(input_signal) + noise
elif noise_type == 'radar':
add_pos = np.random.choice([0.0, 1.0], data_shape,
p=[1 - radar_prob, radar_prob])
corrupted_signal = radar_power* np.random.standard_normal( size = data_shape ) * add_pos
noise = sigma * torch.randn_like(input_signal) +\
torch.from_numpy(corrupted_signal).float().to(input_signal.device)
corrupted_signal = input_signal + noise
elif noise_type == 't-dist':
dist = StudentT(torch.tensor([vv], device = device))
noise = sigma* dist.sample(input_signal.shape).squeeze()
corrupted_signal = input_signal + noise
return corrupted_signal
def snr_db2sigma(train_snr):
return 10**(-train_snr*1.0/20)
def min_sum_log_sum_exp(x, y):
log_sum_ms = torch.min(torch.abs(x), torch.abs(y))*torch.sign(x)*torch.sign(y)
return log_sum_ms
def min_sum_log_sum_exp_4(x_1, x_2, x_3, x_4):
return min_sum_log_sum_exp(min_sum_log_sum_exp(x_1, x_2), min_sum_log_sum_exp(x_3, x_4))
def log_sum_exp(x, y):
def log_sum_exp_(LLR_vector):
sum_vector = LLR_vector.sum(dim=1, keepdim=True)
sum_concat = torch.cat([sum_vector, torch.zeros_like(sum_vector)], dim=1)
return torch.logsumexp(sum_concat, dim=1)- torch.logsumexp(LLR_vector, dim=1)
Lv = log_sum_exp_(torch.cat([x.unsqueeze(2), y.unsqueeze(2)], dim=2).permute(0, 2, 1))
return Lv
def dec2bitarray(in_number, bit_width):
"""
Converts a positive integer to NumPy array of the specified size containing
bits (0 and 1).
Parameters
----------
in_number : int
Positive integer to be converted to a bit array.
bit_width : int
Size of the output bit array.
Returns
-------
bitarray : 1D ndarray of ints
Array containing the binary representation of the input decimal.
"""
binary_string = bin(in_number)
length = len(binary_string)
bitarray = np.zeros(bit_width, 'int')
for i in range(length-2):
bitarray[bit_width-i-1] = int(binary_string[length-i-1])
return bitarray
def countSetBits(n):
count = 0
while (n):
n &= (n-1)
count+= 1
return count
class STEQuantize(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs, enc_quantize_level = 2, enc_value_limit = 1.0, enc_grad_limit = 0.01, enc_clipping = 'both'):
ctx.save_for_backward(inputs)
assert enc_clipping in ['both', 'inputs']
ctx.enc_clipping = enc_clipping
ctx.enc_value_limit = enc_value_limit
ctx.enc_quantize_level = enc_quantize_level
ctx.enc_grad_limit = enc_grad_limit
x_lim_abs = enc_value_limit
x_lim_range = 2.0 * x_lim_abs
x_input_norm = torch.clamp(inputs, -x_lim_abs, x_lim_abs)
if enc_quantize_level == 2:
outputs_int = torch.sign(x_input_norm)
else:
outputs_int = torch.round((x_input_norm +x_lim_abs) * ((enc_quantize_level - 1.0)/x_lim_range)) * x_lim_range/(enc_quantize_level - 1.0) - x_lim_abs
return outputs_int
@staticmethod
def backward(ctx, grad_output):
if ctx.enc_clipping in ['inputs', 'both']:
input, = ctx.saved_tensors
grad_output[input>ctx.enc_value_limit]=0
grad_output[input<-ctx.enc_value_limit]=0
if ctx.enc_clipping in ['gradient', 'both']:
grad_output = torch.clamp(grad_output, -ctx.enc_grad_limit, ctx.enc_grad_limit)
grad_input = grad_output.clone()
return grad_input, None
def pairwise_distances(codebook):
dists = []
for row1, row2 in combinations(codebook, 2):
distance = (row1-row2).pow(2).sum()
dists.append(np.sqrt(distance.item()))
return dists, np.min(dists)
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册